| # Licensed to the Apache Software Foundation (ASF) under one |
| # or more contributor license agreements. See the NOTICE file |
| # distributed with this work for additional information |
| # regarding copyright ownership. The ASF licenses this file |
| # to you under the Apache License, Version 2.0 (the |
| # "License"); you may not use this file except in compliance |
| # with the License. You may obtain a copy of the License at |
| # |
| # http://www.apache.org/licenses/LICENSE-2.0 |
| # |
| # Unless required by applicable law or agreed to in writing, software |
| # distributed under the License is distributed on an "AS IS" BASIS, |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| # See the License for the specific language governing permissions and |
| # limitations under the License. |
| |
| name: hive |
| |
| services: |
| postgres: |
| image: postgres |
| restart: unless-stopped |
| container_name: postgres |
| hostname: postgres |
| environment: |
| POSTGRES_DB: 'metastore_db' |
| POSTGRES_USER: 'hive' |
| POSTGRES_PASSWORD: 'password' |
| ports: |
| - '5432:5432' |
| volumes: |
| - hive-db:/var/lib/postgresql |
| networks: |
| - hive |
| |
| metastore: |
| image: apache/hive:${HIVE_VERSION} |
| depends_on: |
| - postgres |
| restart: unless-stopped |
| container_name: metastore |
| hostname: metastore |
| environment: |
| DEFAULT_FS: "${DEFAULT_FS}" |
| HIVE_WAREHOUSE_PATH: "${HIVE_WAREHOUSE_PATH}" |
| HADOOP_CLASSPATH: /opt/hadoop/share/hadoop/tools/lib/* |
| DB_DRIVER: postgres |
| SERVICE_NAME: 'metastore' |
| SERVICE_OPTS: > |
| -Xmx1G |
| -Djavax.jdo.option.ConnectionDriverName=org.postgresql.Driver |
| -Djavax.jdo.option.ConnectionURL=jdbc:postgresql://postgres:5432/metastore_db |
| -Djavax.jdo.option.ConnectionUserName=hive |
| -Djavax.jdo.option.ConnectionPassword=password |
| |
| S3_ENDPOINT_URL: "${S3_ENDPOINT_URL}" |
| AWS_ACCESS_KEY_ID: "${AWS_ACCESS_KEY_ID}" |
| AWS_SECRET_ACCESS_KEY: "${AWS_SECRET_ACCESS_KEY}" |
| ports: |
| - '9083:9083' |
| volumes: |
| - warehouse:/opt/hive/data/warehouse |
| - type: bind |
| source: ${POSTGRES_LOCAL_PATH} |
| target: /opt/hive/lib/postgres.jar |
| # Mount local jars to a temporary staging area (Read-Only) |
| - ./jars:/tmp/ext-jars:ro |
| networks: |
| - hive |
| |
| hiveserver2: |
| image: apache/hive:${HIVE_VERSION} |
| depends_on: |
| - metastore |
| restart: unless-stopped |
| container_name: hiveserver2 |
| hostname: hiveserver2 |
| environment: |
| USER: hive |
| HADOOP_CLASSPATH: /opt/hadoop/share/hadoop/tools/lib/* |
| HIVE_SERVER2_THRIFT_PORT: 10000 |
| |
| # Directories shared between HiveServer2 and LLAP daemon |
| HIVE_SCRATCH_DIR: /opt/hive/scratch |
| HIVE_QUERY_RESULTS_CACHE_DIRECTORY: /opt/hive/scratch/_resultscache_ |
| |
| SERVICE_OPTS: >- |
| -Xmx1G |
| -Dhive.metastore.uris=thrift://metastore:9083 |
| |
| -Dhive.execution.mode=${HIVE_EXECUTION_MODE:-container} |
| -Dhive.zookeeper.quorum=${HIVE_ZOOKEEPER_QUORUM:-} |
| -Dhive.llap.daemon.service.hosts=${HIVE_LLAP_DAEMON_SERVICE_HOSTS:-} |
| IS_RESUME: 'true' |
| SERVICE_NAME: 'hiveserver2' |
| |
| S3_ENDPOINT_URL: "${S3_ENDPOINT_URL}" |
| AWS_ACCESS_KEY_ID: "${AWS_ACCESS_KEY_ID}" |
| AWS_SECRET_ACCESS_KEY: "${AWS_SECRET_ACCESS_KEY}" |
| ports: |
| - '10000:10000' |
| - '10002:10002' |
| volumes: |
| - warehouse:/opt/hive/data/warehouse |
| - scratch:/opt/hive/scratch |
| # Mount local jars to a temporary staging area (Read-Only) |
| - ./jars:/tmp/ext-jars:ro |
| networks: |
| - hive |
| |
| zookeeper: |
| profiles: |
| - llap |
| image: zookeeper:3.8.4 |
| container_name: zookeeper |
| hostname: zookeeper |
| restart: unless-stopped |
| ports: |
| - '2181:2181' |
| networks: |
| - hive |
| volumes: |
| - zookeeper_data:/data |
| - zookeeper_datalog:/datalog |
| - zookeeper_logs:/logs |
| |
| #TODO Tez AM container (in the meantime, the HS2(with local Tez AM) + LLAP daemon setup is working properly) |
| # 1. Define and use a Tez AM image from HIVE-29419 or TEZ-4682 |
| # 2. Configure TezAM to use Zookeeper Llap Registry to discover the LLAP daemon |
| # 3. Configure HiveServer2 to use the Tez AM Zookeeper Registry to discover the Tez AM |
| # Prerequisites: |
| # - tez-api 1.0.0-SNAPSHOT jar injected into HiveSever2 until Tez 1.0.0 is released |
| # - make HIVE-29477 happen to let HiveServer2 use Tez external sessions |
| # 4. Define hadoop components here to be used by all the containers (working example can be found at TEZ-4682), currently a local volume |
| |
| llapdaemon: |
| profiles: |
| - llap |
| image: apache/hive:${HIVE_VERSION} |
| depends_on: |
| - zookeeper |
| restart: unless-stopped |
| environment: |
| USER: hive |
| SERVICE_NAME: 'llap' |
| |
| LLAP_MEMORY_MB: '1024' |
| LLAP_EXECUTORS: '1' |
| |
| HIVE_SCRATCH_DIR: /opt/hive/scratch |
| HIVE_QUERY_RESULTS_CACHE_DIRECTORY: /opt/hive/scratch/_resultscache_ |
| |
| LOCAL_DIRS: /tmp/llap-local |
| |
| LLAP_WEB_PORT: '15001' |
| LLAP_MANAGEMENT_RPC_PORT: '15004' |
| LLAP_SHUFFLE_PORT: '15551' |
| |
| DEFAULT_FS: "${DEFAULT_FS}" |
| S3_ENDPOINT_URL: "${S3_ENDPOINT_URL}" |
| AWS_ACCESS_KEY_ID: "${AWS_ACCESS_KEY_ID}" |
| AWS_SECRET_ACCESS_KEY: "${AWS_SECRET_ACCESS_KEY}" |
| volumes: |
| - warehouse:/opt/hive/data/warehouse |
| - scratch:/opt/hive/scratch |
| networks: |
| - hive |
| |
| volumes: |
| hive-db: |
| warehouse: |
| scratch: |
| zookeeper_data: |
| name: zookeeper_data |
| zookeeper_datalog: |
| name: zookeeper_datalog |
| zookeeper_logs: |
| name: zookeeper_logs |
| |
| networks: |
| hive: |
| name: hive |