| ############################################################################### |
| # Licensed to the Apache Software Foundation (ASF) under one |
| # or more contributor license agreements. See the NOTICE file |
| # distributed with this work for additional information |
| # regarding copyright ownership. The ASF licenses this file |
| # to you under the Apache License, Version 2.0 (the |
| # "License"); you may not use this file except in compliance |
| # with the License. You may obtain a copy of the License at |
| # |
| # http://www.apache.org/licenses/LICENSE-2.0 |
| # |
| # Unless required by applicable law or agreed to in writing, |
| # software distributed under the License is distributed on an |
| # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
| # KIND, either express or implied. See the License for the |
| # specific language governing permissions and limitations |
| # under the License. |
| ############################################################################### |
| |
| ## |
| ## Required Properties |
| ## |
| |
| #dataInputFormat -- required -- 'base', 'elasticsearch', or 'standalone' -- Specify the input format |
| pir.dataInputFormat= |
| |
| #outputFile -- required -- Fully qualified name of output file in hdfs |
| pir.outputFile= |
| |
| #platform -- required -- 'mapreduce', 'spark', or 'standalone' |
| #Processing platform technology for the responder |
| platform= |
| |
| #queryInput -- required -- Fully qualified dir in hdfs of Query files |
| pir.queryInput= |
| |
| |
| ## |
| ## Optional Args - Leave empty if not using/not changing default values |
| ## |
| |
| #inputData -- required if baseInputFormat = 'base' |
| #Fully qualified name of input file/directory in hdfs; used if inputFormat = 'base' |
| #pir.inputData= |
| |
| #dataSchemas -- optional -- Comma separated list of data schema file names to load |
| #responder.dataSchemas= |
| |
| #querySchemas -- optional -- Comma separated list of query schema file names to load |
| #responder.querySchemas= |
| |
| #allowAdHocQuerySchemas -- 'true' or 'false' |
| #If true, allows embedded QuerySchemas for a query. |
| #Defaults to 'false' |
| #pir.allowEmbeddedQuerySchemas= |
| |
| #colMultReduceByKey -- 'true' or 'false' -- Spark only |
| #If true, uses reduceByKey in performing column multiplication; if false, uses groupByKey -> reduce |
| #Defaults to 'false' |
| #pir.colMultReduceByKey= |
| |
| #baseInputFormat -- required if baseInputFormat = 'base' |
| #Full class name of the InputFormat to use when reading in the data - must extend BaseInputFormat |
| #pir.baseInputFormat= |
| |
| #esQuery -- required if baseInputFormat = 'elasticsearch' -- ElasticSearch query |
| #if using 'elasticsearch' input format |
| #pir.esQuery= |
| |
| #esResource -- required if baseInputFormat = 'elasticsearch' |
| #Requires the format <index>/<type> : Elasticsearch resource where data is read and written to |
| #pir.esResource= |
| |
| #useHDFSLookupTable -- 'true' or 'false' - Whether or not to generate and use the |
| #hdfs lookup table for modular exponentiation |
| #Defaults to 'false' |
| #pir.useHDFSLookupTable= |
| |
| #baseQuery -- ElasticSearch-like query if using 'base' input format - |
| #used to filter records in the RecordReader |
| #Defaults to ?q=* |
| #pir.baseQuery= |
| |
| #limitHitsPerSelector -- 'true' or 'false' |
| #Whether or not to limit the number of hits per selector |
| #Defaults to 'true' |
| #pir.limitHitsPerSelector= |
| |
| #mapreduceMapJavaOpts -- Amount of heap (in MB) to allocate per map task |
| #Defaults to -Xmx2800m |
| #mapreduce.map.java.opts= |
| |
| #mapreduceMapMemoryMb -- Amount of memory (in MB) to allocate per map task |
| #Defaults to 3000 |
| #mapreduce.map.memory.mb= |
| |
| #mapreduceReduceJavaOpts |
| #Amount of heap (in MB) to allocate per reduce task |
| #Defaults to -Xmx2800m |
| #mapreduce.reduce.java.opts= |
| |
| #mapreduceReduceMemoryMb |
| #Amount of memory (in MB) to allocate per reduce task |
| #Defaults to 3000 |
| #mapreduce.reduce.memory.mb= |
| |
| #stopListFile -- optional (unless using StopListFilter) -- Fully qualified file in hdfs |
| #containing stoplist terms; used by the StopListFilter |
| #pir.stopListFile= |
| |
| #useLocalCache -- 'true' or 'false' |
| #Whether or not to use the local cache for modular exponentiation |
| #Defaults to 'true' |
| #pir.useLocalCache= |
| |
| #useModExpJoin -- 'true' or 'false' -- Spark only |
| #Whether or not to pre-compute the modular exponentiation table and join it to the data |
| #partitions when performing the encrypted row calculations |
| #Defaults to 'false' |
| #pir.useModExpJoin= |
| |
| #numReduceTasks -- optional -- Number of reduce tasks |
| #pir.numReduceTasks= |
| |
| #numColMultPartitions -- optional, Spark only |
| #Number of partitions to use when performing column multiplication |
| #pir.numColMultPartitions= |
| |
| #maxHitsPerSelector -- optional -- Max number of hits encrypted per selector |
| #pir.maxHitsPerSelector= |
| |
| #dataParts -- optional -- Number of partitions for the input data |
| #pir.numDataPartitions= |
| |
| #numExpLookupPartitions -- optional -- Number of partitions for the exp lookup table |
| #pir.numExpLookupPartitions= |
| |