| # Licensed to the Apache Software Foundation (ASF) under one |
| # or more contributor license agreements. See the NOTICE file |
| # distributed with this work for additional information |
| # regarding copyright ownership. The ASF licenses this file |
| # to you under the Apache License, Version 2.0 (the |
| # "License"); you may not use this file except in compliance |
| # with the License. You may obtain a copy of the License at |
| # |
| # http://www.apache.org/licenses/LICENSE-2.0 |
| # |
| # Unless required by applicable law or agreed to in writing, |
| # software distributed under the License is distributed on an |
| # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
| # KIND, either express or implied. See the License for the |
| # specific language governing permissions and limitations |
| # under the License. |
| |
| # Pig default configuration file. All values can be overwritten by pig.properties and command line arguments. |
| # see bin/pig -help |
| |
| # brief logging (no timestamps) |
| brief=false |
| |
| #debug level, INFO is default |
| debug=INFO |
| |
| #verbose print all log messages to screen (default to print only INFO and above to screen) |
| verbose=false |
| |
| #exectype local|mapreduce, mapreduce is default |
| exectype=mapreduce |
| |
| #Enable insertion of information about script into hadoop job conf |
| pig.script.info.enabled=true |
| |
| #Do not spill temp files smaller than this size (bytes) |
| pig.spill.size.threshold=5000000 |
| #EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes) |
| #This should help reduce the number of files being spilled. |
| pig.spill.gc.activation.size=40000000 |
| |
| #the following two parameters are to help estimate the reducer number |
| pig.exec.reducers.bytes.per.reducer=1000000000 |
| pig.exec.reducers.max=999 |
| |
| #Temporary location to store the intermediate data. |
| pig.temp.dir=/tmp/ |
| |
| #Threshold for merging FRJoin fragment files |
| pig.files.concatenation.threshold=100 |
| pig.optimistic.files.concatenation=false; |
| |
| pig.disable.counter=false |
| |
| pig.sql.type=hcat |
| |
| pig.output.committer.recovery.support=false |
| |
| pig.stats.output.size.reader=org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.FileBasedOutputSizeReader |
| pig.stats.output.size.reader.unsupported=org.apache.pig.builtin.mock.Storage,org.apache.hcatalog.pig.HCatStorer,org.apache.hive.hcatalog.pig.HCatStorer,org.apache.pig.piggybank.storage.DBStorage |
| |
| pig.tez.opt.union.unsupported.storefuncs=org.apache.hcatalog.pig.HCatStorer,org.apache.hive.hcatalog.pig.HCatStorer,org.apache.pig.piggybank.storage.DBStorage,org.apache.pig.piggybank.storage.MultiStorage |
| |
| pig.sort.readonce.loadfuncs=org.apache.pig.backend.hadoop.hbase.HBaseStorage,org.apache.pig.backend.hadoop.accumulo.AccumuloStorage |
| |
| pig.ats.enabled=true |
| |
| pig.tez.configure.am.memory=true |