Add kubernetes deploy files. (#2265)

* Add kubernetes deploy files.

* Make default zookeeper deployment have only 1 container.

* Move apiserver yaml config to gcp subdirectory.
diff --git a/deploy/kubernetes/gcp/apiserver.yaml b/deploy/kubernetes/gcp/apiserver.yaml
new file mode 100644
index 0000000..d364701
--- /dev/null
+++ b/deploy/kubernetes/gcp/apiserver.yaml
@@ -0,0 +1,42 @@
+##
+## Heron API server deployment
+##
+apiVersion: apps/v1beta1
+kind: Deployment
+metadata:
+  name: heron-apiserver
+spec:
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: heron-apiserver
+    spec:
+      containers:
+        - name: heron-apiserver
+          image: heron/heron:latest
+          command: ["sh", "-c"]
+          args:
+            - >-
+              heron-apiserver
+              --base-template kubernetes
+              --cluster kubernetes
+              -D heron.statemgr.connection.string=zookeeper:2181
+              -D heron.kubernetes.scheduler.uri=http://localhost:8001
+              -D heron.executor.docker.image=heron/heron:latest
+              -D heron.class.uploader=com.twitter.heron.uploader.gcs.GcsUploader
+              -D heron.uploader.gcs.bucket=<gcs-bucket-name>
+              -D heron.uploader.gcs.credentials_path=/heron/secret/<google-cloud-credentials-file>
+          volumeMounts:
+            - name: google-cloud-service-account
+              mountPath: /heron/secret/
+        - name: kubectl-proxy
+          image: heron/kubectl:latest
+          command: ["sh", "-c"]
+          args:
+            - >
+              kubectl proxy -p 8001
+      volumes:
+        - name: google-cloud-service-account
+          secret:
+            secretName: <google-cloud-secret-name>
diff --git a/deploy/kubernetes/tools.yaml b/deploy/kubernetes/tools.yaml
new file mode 100644
index 0000000..d0500fc
--- /dev/null
+++ b/deploy/kubernetes/tools.yaml
@@ -0,0 +1,55 @@
+##
+## Deployment Pod for tracker and ui
+##
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: heron-tracker
+  namespace: default
+spec:
+  template:
+    metadata:
+      labels:
+        app: heron-tracker
+    spec:
+      containers:
+        - name: heron-tracker
+          image: heron/heron:latest
+          ports:
+            - containerPort: 8080
+              name: api-port
+          command: ["sh", "-c"]
+          args:
+            - >-
+              heron-tracker
+              --type=zookeeper
+              --name=localzk
+              --hostport=zookeeper:2181
+              --rootpath="/heron"
+        - name: heron-ui
+          image: heron/heron:latest
+          ports:
+            - containerPort: 8081
+              name: app-port
+          command: ["sh", "-c"]
+          args:
+            - >-
+              heron-ui
+              --port=8081
+              --base_url=/api/v1/proxy/namespaces/default/services/heron-ui:8081
+---
+
+##
+## Service to expose the heron-ui
+##
+apiVersion: v1
+kind: Service
+metadata:
+  name: heron-ui
+spec:
+  selector:
+    app: heron-tracker
+  ports:
+    - protocol: TCP
+      port: 8081
+      targetPort: 8081
diff --git a/deploy/kubernetes/zookeeper.yaml b/deploy/kubernetes/zookeeper.yaml
new file mode 100644
index 0000000..f1d417e
--- /dev/null
+++ b/deploy/kubernetes/zookeeper.yaml
@@ -0,0 +1,132 @@
+##
+## Simple ZooKeeper deployment
+##
+
+## Define a storage class to ask for SSD
+## persistent volumes for ZK
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+  name: ssd
+provisioner: kubernetes.io/gce-pd
+parameters:
+  type: pd-ssd
+---
+
+## Define a disruption budget to ensure there are at least
+## 1 ZK servers running all the time
+apiVersion: policy/v1beta1
+kind: PodDisruptionBudget
+metadata:
+  name: zk-budget
+spec:
+  selector:
+    matchLabels:
+      app: zk
+  minAvailable: 1
+---
+
+## Define a StatefulSet for ZK servers
+apiVersion: apps/v1beta1
+kind: StatefulSet
+metadata:
+  name: zk
+  labels:
+    app: heron
+    component: zookeeper
+spec:
+  serviceName: zookeeper
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: heron
+        component: zookeeper
+
+    spec:
+      # Make sure multiple pods of ZK don't get scheduled on the
+      # same node, unless there are no other available nodes
+      affinity:
+        podAntiAffinity:
+          preferredDuringSchedulingIgnoredDuringExecution:
+            - weight: 1
+              podAffinityTerm:
+                labelSelector:
+                  matchExpressions:
+                    - key: "app"
+                      operator: In
+                      values:
+                        - zookeeper
+                topologyKey: "kubernetes.io/hostname"
+      containers:
+        - name: zookeeper
+          image: heron/heron:latest
+          command: ["sh", "-c"]
+          args:
+            - >
+              /opt/zookeeper/scripts/start-zookeeper.sh
+          ports:
+            - containerPort: 2181
+              name: client
+            - containerPort: 2888
+              name: server
+            - containerPort: 3888
+              name: leader-election
+          env:
+            - name: ZOOKEEPER_SERVERS
+              value: zk-0
+          readinessProbe:
+              exec:
+                command:
+                  - "/opt/zookeeper/scripts/zookeeper-ruok.sh"
+              initialDelaySeconds: 5
+              timeoutSeconds: 5
+
+          livenessProbe:
+              exec:
+                command:
+                  - "/opt/zookeeper/scripts/zookeeper-ruok.sh"
+              initialDelaySeconds: 15
+              timeoutSeconds: 5
+
+          volumeMounts:
+            - name: datadir
+              mountPath: /heron/data
+
+  volumeClaimTemplates:
+    - metadata:
+        name: datadir
+      spec:
+        accessModes: [ "ReadWriteOnce" ]
+        resources:
+          requests:
+            storage: 10Gi
+        storageClassName: ssd
+
+
+---
+
+##
+## Define the ZooKeeper headless service
+##
+apiVersion: v1
+kind: Service
+metadata:
+  annotations:
+    service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
+  name: zookeeper
+  labels:
+    app: heron
+    component: zookeeper
+spec:
+  ports:
+    - port: 2888
+      name: server
+    - port: 3888
+      name: leader-election
+    - port: 2181
+      name: client
+  clusterIP: None
+  selector:
+    app: heron
+    component: zookeeper