| ############################################################################### |
| # Licensed to the Apache Software Foundation (ASF) under one |
| # or more contributor license agreements. See the NOTICE file |
| # distributed with this work for additional information |
| # regarding copyright ownership. The ASF licenses this file |
| # to you under the Apache License, Version 2.0 (the |
| # "License"); you may not use this file except in compliance |
| # with the License. You may obtain a copy of the License at |
| # |
| # http://www.apache.org/licenses/LICENSE-2.0 |
| # |
| # Unless required by applicable law or agreed to in writing, |
| # software distributed under the License is distributed on an |
| # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
| # KIND, either express or implied. See the License for the |
| # specific language governing permissions and limitations |
| # under the License. |
| ############################################################################### |
| |
| ## |
| ## Default System Configuration Properties |
| ## |
| |
| #Name of the local properties file - used when running with the |
| #hadoop jar command |
| local.pirk.properties.dir=/root/ |
| |
| ## |
| ##Spark path for SparkLauncher |
| ## |
| spark.home = /usr |
| |
| ## |
| ## Data schema properties |
| ## |
| ## Each data schema should be specified in an xml file of the form: |
| ## |
| ##<schema> |
| ## <schemaName> name of the schema </schemaName> |
| ## <element> |
| ## <name> element name </name> |
| ## <type> class name or type name (if Java primitive type) of the element </type> |
| ## <isArray> true or false -- whether or not the schema element is an array within the data </isArray> |
| ## <partitioner> optional - Partitioner class for the element; defaults to primitive java type partitioner </partitioner> |
| ## </element> |
| ## </schema> |
| ## |
| ## Primitive Java types must be one of the following: "byte", "short", "int", "long", "float", |
| ## "double", "char", "string", "boolean" |
| ## |
| |
| #Comma separated list of local data schema files to load, fully qualified file names |
| data.schemas = none |
| |
| ## |
| ## Query schema properties |
| ## |
| ## Each query schema should be specified in an xml file of the form; |
| ## all items are treated in a case insensitive manner: |
| ## |
| ##<schema> |
| ## <schemaName> name of the schema </schemaName> |
| ## <dataSchemaName> name of the data schema over which this query is run </dataSchemaName> |
| ## <selectorName> name of the element in the data schema that will be the selector </selectorName> |
| ## <elements> |
| ## <name> element name of element in the data schema to include in the query response </name> |
| ## </elements> |
| ## <filter> (optional) name of the filter class to use to filter the data </filter> |
| ## <filterNames> |
| ## <name> (optional) element name of element in the data schema to apply pre-processing filters </name> |
| ## </filterNames> |
| ## <additional> (optional) additional fields for the query schema, in <key,value> pairs |
| ## <field> |
| ## <key> key corresponding the the field </key> |
| ## <value> value corresponding to the field </value> |
| ## </field> |
| ## </additional> |
| ## </schema> |
| ## |
| ## |
| |
| #Comma separated list of local query schema files to load, fully qualified file names |
| query.schemas = none |
| |
| ## |
| ##Properties for ElasticSearch compatibility |
| ## |
| |
| #ES host address - One Elasticsearch node in the cluster - may include port specification |
| es.nodes=none |
| |
| #Default HTTP/REST port used for connecting to Elasticsearch |
| es.port=9200 |
| |
| #Number of results/items returned by each individual scroll |
| es.scroll.size = 1000 |
| |
| #Whether elasticsearch-hadoop will treat empty fields as null |
| es.field.read.empty.as.null=yes |
| |
| |
| ## |
| ##Properties for functional testing |
| ## |
| |
| #Test index to create in ES (created and deleted within the tests) |
| #Need final form to be: <host>:<port>/<test.esTestIndex> |
| test.es.index = testindex |
| |
| #Type of elements to insert in ES |
| test.es.type = pkt |
| |
| #Elasticsearch resource - Elasticsearch resource location where data is read and written to. |
| #Requires the format <index>/<type> |
| test.es.resource= /testindex/pkt |
| |
| #Pathname in hdfs to place input JSON file testing |
| test.inputJSONFile = /tmp/testJSONInput |
| |
| #Pathname in hdfs to place output file for testing |
| test.outputHDFSFile = /tmp/testOutput |
| |
| #Query input dir in hdfs for testing |
| test.queryInputDir = none |
| |
| #Test stoplist file |
| test.stopListFile = /tmp/testStopListFile |
| |
| #Whether or not we are running PIR testing (used as a flag to dump intermediate RDDs for checking) |
| #This should default to false; it is changed to true in the test suite, as applicable |
| pir.test = false |
| |
| #HDFS output dir for PIR intermediate testing |
| #Should default to none; changed to a real hdfs path in the test suite, as applicable |
| pir.testOut = none |
| |
| |
| ## |
| ## Properties to enable/disable JNA-GMP modPow acceleration for Paillier |
| ## |
| |
| paillier.useGMPForModPow = true |
| |
| # The JNA-GMP library we use to invoke the much faster, natively compiled GMP |
| # can be called in a way that tries to make all modPow calls take a constant amount |
| # of time to execute. This will slow down the modPow performance (still better than |
| # Java's BigInteger.modPow() ). |
| # If you are using this package in a multi-tenant computing environment and have |
| # concerns about other tenants being able to closely inspect the runtime of your |
| # software, you may want to enable this property. |
| paillier.GMPConstantTimeMode = false |
| |
| # This property controls the more rigorous prime generation checks in PrimeMaker.java |
| # which are based on FIPS SP 800-56B and FIPS 186-4 (extra Miller-Rabin rounds, limits |
| # on how close the primes p and q can be, and bounds on the values of the primes) |
| # These checks slow down prime generation considerably |
| pallier.FIPSPrimeGenerationChecks = true |
| |
| ## These properties control the secure random number generator algorithm and provider. |
| ## You can specify just the algorithm, or both algorithm and provider. The system's |
| ## default secure random is used when the algorithm is left unspecified. |
| pallier.secureRandom.algorithm=NativePRNG |
| #pallier.secureRandom.provider=SUN |
| |
| ## |
| ## Properties for PIR query and response |
| ## |
| |
| #Number of bits to return when encoding/returning string values as part of return data elements |
| pir.stringBits = 64 |
| |
| #Number of array elements to return when encoding/returning array valued elements |
| pir.numReturnArrayElements = 2 |
| |
| #Default prime certainty |
| pir.primeCertainty = 128 |
| |
| #Fully qualified dir in hdfs of Query files |
| pir.queryInput = none |
| |
| #Data input format type -- 'base' or 'elasticsearch' (given in InputFormatsConst class) |
| pir.dataInputFormat = base |
| |
| #Fully qualified name of input file/directory in hdfs; used if pir.dataInputFormat = base |
| pir.inputData = none |
| |
| #Inputformat for 'base' data input format type -- must extend BaseInputFormat |
| pir.baseInputFormat = none |
| |
| #ElasticSearch-like query if using 'base' input format |
| pir.baseQuery = none |
| |
| #ES resource for input data |
| pir.esResource = null |
| |
| #ES query for input data |
| pir.esQuery = none |
| |
| #Fully qualified name of output file in hdfs |
| pir.outputFile = none |
| |
| #Fully qualified dir in hdfs of file containing stoplist terms |
| pir.stopListFile = stopListFile |
| |
| #Number of reduce tasks |
| pir.numReduceTasks = 100 |
| |
| #Whether or not to use the local cache during PIR computations |
| pir.useLocalCache = true |
| |
| #Whether or not to limit the hits for each query term |
| pir.limitHitsPerSelector = true |
| |
| #Number of hits to limit for each query term, if pir.limitHitsPerSelector = true |
| pir.maxHitsPerSelector = 100 |
| |
| #Whether or not to embed the selector in the results for false positive reduction |
| pir.embedSelector = true |
| |
| #Whether or not to generate and use the HDFS modular exponentiation lookup table |
| pir.useHDFSLookupTable = false |
| |
| #Number of partitions to coalesce the input data into in Spark |
| pir.numDataPartitions = 1500 |
| |
| #Mapreduce memory options |
| mapreduce.map.memory.mb = 3000 |
| mapreduce.reduce.memory.mb = 3000 |
| mapreduce.map.java.opts = -Xmx2800m |
| mapreduce.reduce.java.opts = -Xmx2800m |
| |
| #HDFS directory for the expLookupTable |
| pir.expDir = none |
| |
| #Parallelism for expLookupTable creation in hdfs |
| pir.expCreationSplits = 600 |