blob: 442103719bc6ba81015be2f42e348bb3ea41657b [file] [log] [blame]
### Part 1: parameters similar to Z-MERT
# target sentences file name (in this case, file name prefix)
-r ./ref.en
-rps 4 # references per sentence
-p ./params.txt # parameter file
#metric setting:
-m BLEU 4 closest
#-m TER nocase punc 5 5 joshua/zmert/tercom-0.7.25/tercom.7.25.jar 1
#-m TER-BLEU nocase punc 20 50 joshua/zmert/tercom-0.7.25/tercom.7.25.jar 1 4 closest
#-m METEOR en norm_yes keepPunc 2 #old meteor interface #Z-MERT Meteor interface(not working)
#-m Meteor en lowercase '0.5 1.0 0.5 0.5' 'exact stem synonym paraphrase' '1.0 0.5 0.5 0.5' #CMU meteor interface
-maxIt 30 # maximum PRO iterations
-cmd ./decoder_command # file containing commands to run decoder
-decOut ./output.nbest # file prodcued by decoder
-dcfg ./joshua.config # decoder config file
-N 300 # size of N-best list
-v 1 # verbosity level (0-2; higher value => more verbose)
-seed 12341234 # random number generator seed
### Part2: PRO parameters
#-trainingMode can be 1,2,3,4
#1: train dense feature weights only
#2: train dense & sparse feature weights together
#3: train sparse feature weights only(with dense feature weights fixed) also works)
#4: treat sparse features as one component(summary feature), train dense and summary feature weights together
-trainingMode 2
#-nbestFormat can be "sparse" or "dense"
#for trainingMode 1: either "dense" or "sparse"
#for trainingMode 2-4: use "sparse" format
-nbestFormat sparse #dense or sparse
#use one of the classifiers(and the corresponding parameter setting) below:
#1.perceptron paramters
-classifierClass joshua.pro.ClassifierPerceptron
-classifierParams '30 0.5 0.0'
#2.MegaM parameters
#-classifierClass joshua.pro.ClassifierMegaM
#-classifierParams './megam_command ./megam_train.data ./megam_weights'
#3.Stanford Max-Ent parameters
#-classifierClass joshua.pro.ClassifierMaxEnt
#-classifierParams './maxent_prop_file'
#4.LibSVM parameters
#-classifierClass joshua.pro.ClassifierSVM
#-classifierParams './libsvm_command ./libsvm_train.data ./libsvm_train.data.model'
-Tau 8000 #num of candidate samples
-Xi 50 #num of top candidates
-interCoef 0.5 #linear interpolation coef. range:[0,1]. 1=using new weights only; 0=using previous weights only
-metricDiff 0.05 #threshold for sample selection