blob: d870172d765b2e904e6c463ba00155fb1c3e6a15 [file] [log] [blame]
<?xml version="1.0" encoding="UTF-8" ?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<configuration>
<!--
Note that values can make use of environment variables by using variable
substitution, e.g. ${env.AWS_ACCESS_KEY_ID} will be replaced with the
AWS_ACCESS_KEY_ID environment variable.
If you are deploying in a containerized environment this can be handy
since you can have all the settings in here use environment variables
and set those variables appropriately for each environment using
environment variables, which are often easier to modify individually
than a config file.
-->
<!--
Credentials for the AWS S3 storage plugin can be set here using
fs.s3a.access.key and fs.s3a.secret.key properties.
-->
<!--
<property>
<name>fs.s3a.access.key</name>
<value>${env.AWS_ACCESS_KEY_ID}</value>
</property>
<property>
<name>fs.s3a.secret.key</name>
<value>${env.AWS_SECRET_ACCESS_KEY}</value>
</property>
-->
<!--
Credentials for the mongo storage plugin can be provided here instead of in the plugin configuration. Note
that to configure a plugin with the name "foo" you would set drill.exec.store.foo.username; the examples here
use the default plugin name of "mongo".
-->
<!--
<property>
<name>drill.exec.store.mongo.username</name>
<value>${env.MONGO_USERNAME}</value>
<description>
Username for mongo storage plugin
</description>
</property>
<property>
<name>drill.exec.store.mongo.password</name>
<value>${env.MONGO_PASSWORD}</value>
<description>
Password for mongo storage plugin.
</description>
</property>
-->
<!-- Credentials Provider Configuration -->
<!--
<property>
<name>hadoop.security.credential.provider.path</name>
<value>jceks://file/opt/drill/conf/credentials.jceks</value>
<description>
Use this property to specify one or more credential provider URIs
instead of configuring credentials in plaintext using the properties above.
To store credentials in the filesystem you can use a value like:
jceks://file/opt/drill/conf/credentials.jceks
You can also store credentials in S3 (except the S3 credentials themselves):
jceks://s3a@drill-credentials/credentials.jceks
You may specify multiple provider paths, comma-separated:
jceks://file/opt/drill/conf/credentials.jceks,jceks://s3a@drill-credentials/credentials.jceks
To create/set credentials, set this property, install Hadoop, and use the hadoop CLI:
export HADOOP_CONF_DIR=/opt/drill/conf
hadoop credential create fs.s3a.access.key
hadoop credential create fs.s3a.access.secret
hadoop credential create drill.exec.store.mongo.username -provider jceks://s3a@creds/mongo.jceks
hadoop credential create drill.exec.store.mongo.password -provider jceks://s3a@creds/mongo.jceks
Refer to the CredentialProviderAPI documentation for more details:
https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/CredentialProviderAPI.html
</description>
</property>
<property>
<name>hadoop.security.credstore.java-keystore-provider.password-file</name>
<value>/opt/drill/conf/credentials-password.txt</value>
<description>
You may set this property to the name of a file containing a secret string that will be used to
encrypt / decrypt credentials. If this is not specified, a default password of "none" will be used.
This may provide added security if the credentials are stored in a different filesystem from the
password file itself. For example, if the credentials file were stored on S3 and an attacker accessed
it, they would still need access to this password file before they could make use of the credentials.
</description>
</property>
-->
<!--
Set this property to true to avoid caching of S3 file system configuration properties,
so when you add/update a property (e.g. fs.s3a.secret.key) in S3 storage plugin its new
value will be taken
-->
<!--
<property>
<name>fs.s3a.impl.disable.cache</name>
<value>true</value>
</property>
-->
<!--
HDFS Connector for Object Storage:
https://docs.oracle.com/en-us/iaas/Content/API/SDKDocs/hdfsconnector.htm
-->
<!--
<property>
<name>fs.oci.client.hostname</name>
<value>https://objectstorage.us-ashburn-1.oraclecloud.com</value>
<description>
The URL of the host endpoint. For example, https://www.example.com.
</description>
</property>
<property>
<name>fs.oci.client.auth.tenantId</name>
<value>ocid1.tenancy.oc1..exampleuniqueID</value>
<description>
The OCID of your tenancy. To get the value, see Required Keys and OCIDs:
https://docs.oracle.com/iaas/Content/API/Concepts/apisigningkey.htm
</description>
</property>
<property>
<name>fs.oci.client.auth.userId</name>
<value>ocid1.user.oc1..exampleuniqueID</value>
<description>
The OCID of the user calling the API. To get the value, see Required Keys and OCIDs.
</description>
</property>
<property>
<name>fs.oci.client.auth.fingerprint</name>
<value>20:3b:97:13:55:1c:5b:0d:d3:37:d8:50:4e:c5:3a:34</value>
<description>
The fingerprint for the key pair being used. To get the value, see Required Keys and OCIDs.
</description>
</property>
<property>
<name>fs.oci.client.auth.pemfilepath</name>
<value>/opt/drill/conf/oci_api_key.pem</value>
<description>
The full path and file name of the private key used for authentication. The file should be on the local file system.
</description>
</property>
-->
<!--
Use the next property to set a limit on the numer of files that Drill
will list by recursing into a DFS directory tree. When the limit is
encountered the initiating operation will fail with an error. The
intended application of this limit is to allow admins to protect their
Drillbits from an errant or malicious SELECT * FROM dfs.huge_workspace
LIMIT 10 query, which will cause an OOM given a big enough workspace of
files. Defaults to 0 which means no limit.
-->
<!--
<property>
<name>drill.exec.recursive_file_listing_max_size</name>
<value>10000</value>
</property>
--->
</configuration>