blob: 146fc5fe1cc6136fd7a3555495a4e2b97f4642b8 [file] [log] [blame]
# Pig configuration file. All values can be overwritten by command line arguments.
# log4jconf log4j configuration file
# log4jconf=./conf/log4j.properties
# a file that contains pig script
#file=
# load jarfile, colon separated
#jar=
#verbose print all log messages to screen (default to print only INFO and above to screen)
#verbose=true
#exectype local|mapreduce, mapreduce is default
#exectype=local
#pig.logfile=
#Do not spill temp files smaller than this size (bytes)
#pig.spill.size.threshold=5000000
#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
#This should help reduce the number of files being spilled.
#pig.spill.gc.activation.size=40000000
#the following two parameters are to help estimate the reducer number
#pig.exec.reducers.bytes.per.reducer=1000000000
#pig.exec.reducers.max=999
#Use this option only when your Pig job will otherwise die because of
#using more counter than hadoop configured limit
#pig.disable.counter=true