Exchange own Kafka image with ches/kafka

I believe our own Kafka image was based on this anyway.

- Removed redo target
- Removed kafka from settings.gradle
- Removed the services folder :tada:
diff --git a/services/kafka/Dockerfile b/services/kafka/Dockerfile
deleted file mode 100644
index e6711c8..0000000
--- a/services/kafka/Dockerfile
+++ /dev/null
@@ -1,48 +0,0 @@
-# Builds Apache Kafka 0.9.0.0 from binary distribution.
-# 
-# Runs on Oracle Java 8 and a Ubuntu 14.04
-#
-FROM scala
-
-# kludge for https://github.com/docker/docker/issues/14203
-ENV DOCKER_FIX '                                             '
-
-# primary, jmx, ras
-EXPOSE 9092 7203
-
-# Install Kafka Stuff
-RUN mkdir /kafka /data
-ENV KAFKA_RELEASE 0.10.0.0
-ENV KAFKA_SCALA_VERSION 2.11
-ENV KAFKA_RELEASE_ARCHIVE kafka_${KAFKA_SCALA_VERSION}-${KAFKA_RELEASE}.tgz
-
-# Download Kafka binary distribution
-#
-# Install npm packages globally before doing ADDs which ruin docker cache
-# Check artifact digest integrity
-# Install Kafka to /kafka
-
-WORKDIR /tmp
-RUN wget --no-verbose http://www.us.apache.org/dist/kafka/${KAFKA_RELEASE}/${KAFKA_RELEASE_ARCHIVE} && \
-    wget --no-verbose https://dist.apache.org/repos/dist/release/kafka/${KAFKA_RELEASE}/${KAFKA_RELEASE_ARCHIVE}.md5  && \
-  echo VERIFY CHECKSUM: && \
-  gpg --print-md MD5 ${KAFKA_RELEASE_ARCHIVE} 2>/dev/null && \
-  cat ${KAFKA_RELEASE_ARCHIVE}.md5 && \
-  tar -zx -C /kafka --strip-components=1 -f ${KAFKA_RELEASE_ARCHIVE} && \
-  rm -rf kafka_*
-
-ADD config /kafka/config
-ADD start.sh /start.sh
-
-# Set up a user to run Kafka
-RUN chmod uog+rx /start.sh && \
-  groupadd kafka && \
-  useradd -d /kafka -g kafka -s /bin/false kafka && \
-  chown -R kafka:kafka /kafka /data /logs
-USER kafka
-ENV PATH /kafka/bin:$PATH
-WORKDIR /kafka
-
-VOLUME [ "/data", "/logs" ]
-
-CMD ["/start.sh"]
diff --git a/services/kafka/build.gradle b/services/kafka/build.gradle
deleted file mode 100644
index 043bf38..0000000
--- a/services/kafka/build.gradle
+++ /dev/null
@@ -1,3 +0,0 @@
-ext.dockerImageName = 'kafka'
-apply from: '../../gradle/docker.gradle'
-distDocker.dependsOn ':common:scala:distDocker'
diff --git a/services/kafka/config/log4j.properties b/services/kafka/config/log4j.properties
deleted file mode 100644
index 6197c5b..0000000
--- a/services/kafka/config/log4j.properties
+++ /dev/null
@@ -1,78 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-kafka.logs.dir=/logs
-
-log4j.rootLogger=INFO, stdout
-
-log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
-
-log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log
-log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
-log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
-
-log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log
-log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
-log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
-
-log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log
-log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
-log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
-
-log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.cleanerAppender.File=log-cleaner.log
-log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout
-log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
-
-log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log
-log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
-log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
-
-# Turn on all our debugging info
-#log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG, kafkaAppender
-#log4j.logger.kafka.client.ClientUtils=DEBUG, kafkaAppender
-#log4j.logger.kafka.perf=DEBUG, kafkaAppender
-#log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG, kafkaAppender
-#log4j.logger.org.I0Itec.zkclient.ZkClient=DEBUG
-log4j.logger.kafka=INFO, kafkaAppender
-
-log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender
-log4j.additivity.kafka.network.RequestChannel$=false
-
-#log4j.logger.kafka.network.Processor=TRACE, requestAppender
-#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
-#log4j.additivity.kafka.server.KafkaApis=false
-log4j.logger.kafka.request.logger=WARN, requestAppender
-log4j.additivity.kafka.request.logger=false
-
-log4j.logger.kafka.controller=TRACE, controllerAppender
-log4j.additivity.kafka.controller=false
-
-log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender
-log4j.additivity.kafka.log.LogCleaner=false
-
-log4j.logger.state.change.logger=TRACE, stateChangeAppender
-log4j.additivity.state.change.logger=false
diff --git a/services/kafka/config/server.properties.default b/services/kafka/config/server.properties.default
deleted file mode 100644
index 0800d37..0000000
--- a/services/kafka/config/server.properties.default
+++ /dev/null
@@ -1,69 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# see kafka.server.KafkaConfig for additional details and defaults
-
-############################# Server Basics #############################
-
-# The id of the broker. This must be set to a unique integer for each broker.
-broker.id={{BROKER_ID}}
-auto.leader.rebalance.enable=true
-
-# Replication
-auto.create.topics.enable=true
-default.replication.factor=1
-
-# Hostname the broker will advertise to consumers. If not set, kafka will use the value returned
-# from InetAddress.getLocalHost().  If there are multiple interfaces getLocalHost
-# may not be what you want.
-advertised.host.name={{EXPOSED_HOST}}
-
-############################# Socket Server Settings #############################
-
-# The port the socket server listens on
-port={{PORT}}
-advertised.port={{EXPOSED_PORT}}
-
-############################# Log Basics #############################
-
-# The directory under which to store log files
-log.dir=/data
-log.dirs=/data
-
-# The number of logical partitions per topic per server. More partitions allow greater parallelism
-# for consumption, but also mean more files.
-num.partitions=1
-
-############################# Log Retention Policy #############################
-
-# The following configurations control the disposal of log segments. The policy can
-# be set to delete segments after a period of time, or after a given size has accumulated.
-# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
-# from the end of the log.
-
-# The minimum age of a log file to be eligible for deletion
-log.retention.hours=168
-
-############################# Zookeeper #############################
-
-# Zk connection string (see zk docs for details).
-# This is a comma separated host:port pairs, each corresponding to a zk
-# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
-# You can also append an optional chroot string to the urls to specify the
-# root directory for all kafka znodes.
-zookeeper.connect={{ZOOKEEPER_IP}}:{{ZOOKEEPER_PORT}}{{CHROOT}}
-zookeeper.connection.timeout.ms=10000
-controlled.shutdown.enable=true
-zookeeper.session.timeout.ms=10000
-
diff --git a/services/kafka/config/tools-log4j.properties b/services/kafka/config/tools-log4j.properties
deleted file mode 100644
index 10cd6c2..0000000
--- a/services/kafka/config/tools-log4j.properties
+++ /dev/null
@@ -1,20 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-log4j.rootLogger=WARN, stdout
-
-log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
diff --git a/services/kafka/config/zookeeper.properties b/services/kafka/config/zookeeper.properties
deleted file mode 100644
index c38186d..0000000
--- a/services/kafka/config/zookeeper.properties
+++ /dev/null
@@ -1,20 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# the directory where the snapshot is stored.
-dataDir=/tmp/zookeeper
-# the port at which the clients will connect
-clientPort=2181
-# disable the per-ip limit on the number of connections since this is a non-production config
-maxClientCnxns=0
diff --git a/services/kafka/start.sh b/services/kafka/start.sh
deleted file mode 100755
index fe2bb1d..0000000
--- a/services/kafka/start.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-
-echo "Preparing to start kafka"
-
-# If a ZooKeeper container is linked with the alias `zookeeper`, use it.
-# TODO Service discovery otherwise
-[ -n "$ZOOKEEPER_PORT_2181_TCP_ADDR" ] && ZOOKEEPER_IP=$ZOOKEEPER_PORT_2181_TCP_ADDR
-[ -n "$ZOOKEEPER_PORT_2181_TCP_PORT" ] && ZOOKEEPER_PORT=$ZOOKEEPER_PORT_2181_TCP_PORT
-
-IP=$(cat /etc/hosts | head -n1 | awk '{print $1}')
-PORT=9092
-
-cat /kafka/config/server.properties.default \
-  | sed "s|{{ZOOKEEPER_IP}}|${ZOOKEEPER_IP}|g" \
-  | sed "s|{{ZOOKEEPER_PORT}}|${ZOOKEEPER_PORT:-2181}|g" \
-  | sed "s|{{BROKER_ID}}|${BROKER_ID:-0}|g" \
-  | sed "s|{{CHROOT}}|${CHROOT:-}|g" \
-  | sed "s|{{EXPOSED_HOST}}|${EXPOSED_HOST:-$IP}|g" \
-  | sed "s|{{PORT}}|${PORT:-9092}|g" \
-  | sed "s|{{EXPOSED_PORT}}|${EXPOSED_PORT:-9092}|g" \
-   > /kafka/config/server.properties
-
-echo "Environment"
-echo "-----------"
-echo "IP = $IP"
-echo "PORT = $PORT"
-echo "CLASSPATH = $CLASSPATH"
-echo "JMX_PORT = $JMX_PORT"
-echo "ZOOKEEPER_IP = $ZOOKEEPER_IP"
-echo "ZOOKEEPER_PORT = $ZOOKEEPER_PORT"
-
-echo ""
-echo "Starting kafka"
-echo "--------------"
-/kafka/bin/kafka-server-start.sh /kafka/config/server.properties
-
diff --git a/settings.gradle b/settings.gradle
index 3927fb5..3d7fb39 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -11,8 +11,6 @@
 include 'core:swift3Action'
 include 'core:javaAction'
 
-include 'services:kafka'
-
 include 'tools:cli'
 
 include 'sdk:docker'