misc user doc clarifications
diff --git a/src/ports/postgres/modules/deep_learning/madlib_keras_fit_multiple_model.sql_in b/src/ports/postgres/modules/deep_learning/madlib_keras_fit_multiple_model.sql_in
index 0468942..33699a4 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras_fit_multiple_model.sql_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras_fit_multiple_model.sql_in
@@ -130,6 +130,13 @@
 
   <DT>num_iterations</DT>
   <DD>INTEGER.  Number of iterations to train.
+
+    @note
+    This parameter is different than the number of passes over the dataset,
+    which is commonly referred to as the number of epochs.  Since MADlib operates
+    in a distributed system, the number of
+    epochs is actually equal to this parameter 'num_iterations' X 'epochs' as
+    specified in the Keras fit parameter.
   </DD>
 
   <DT>use_gpus (optional)</DT>
@@ -1016,18 +1023,18 @@
 <pre class="result">
  mst_key | model_id |                                 compile_params                                  |      fit_params       |  model_type  |  model_size  | metrics_elapsed_time | metrics_type | training_metrics_final | training_loss_final |  training_metrics   |    training_loss    | validation_metrics_final | validation_loss_final | validation_metrics | validation_loss
 ---------+----------+---------------------------------------------------------------------------------+-----------------------+--------------+--------------+----------------------+--------------+------------------------+---------------------+---------------------+---------------------+--------------------------+-----------------------+--------------------+-----------------
-       9 |        2 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=4,epochs=1 | madlib_keras | 1.2197265625 | {0.189763069152832}  | {accuracy}   |         0.983333349228 |      0.102392569184 | {0.983333349227905} | {0.102392569184303} |                          |                       |                    |
-       4 |        1 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=8,epochs=1 | madlib_keras | 0.7900390625 | {0.170287847518921}  | {accuracy}   |         0.975000023842 |      0.159002527595 | {0.975000023841858} | {0.159002527594566} |                          |                       |                    |
-       3 |        1 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=4,epochs=1 | madlib_keras | 0.7900390625 | {0.165465116500854}  | {accuracy}   |         0.966666638851 |       0.10245500505 | {0.966666638851166} | {0.102455005049706} |                          |                       |                    |
-      10 |        2 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=8,epochs=1 | madlib_keras | 1.2197265625 | {0.199872970581055}  | {accuracy}   |         0.941666662693 |       0.12242924422 | {0.941666662693024} | {0.122429244220257} |                          |                       |                    |
-       5 |        1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=4,epochs=1 | madlib_keras | 0.7900390625 | {0.16815185546875}   | {accuracy}   |         0.883333325386 |      0.437314987183 | {0.883333325386047} | {0.437314987182617} |                          |                       |                    |
-      11 |        2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=4,epochs=1 | madlib_keras | 1.2197265625 | {0.430488109588623}  | {accuracy}   |         0.858333349228 |      0.400548309088 | {0.858333349227905} | {0.400548309087753} |                          |                       |                    |
-       6 |        1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=8,epochs=1 | madlib_keras | 0.7900390625 | {0.154508113861084}  | {accuracy}   |         0.683333337307 |      0.634458899498 | {0.683333337306976} | {0.634458899497986} |                          |                       |                    |
-      12 |        2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=8,epochs=1 | madlib_keras | 1.2197265625 | {0.192011833190918}  | {accuracy}   |         0.683333337307 |      0.792817175388 | {0.683333337306976} | {0.792817175388336} |                          |                       |                    |
-       2 |        1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']   | batch_size=8,epochs=1 | madlib_keras | 0.7900390625 | {0.241909980773926}  | {accuracy}   |         0.641666650772 |      0.736794412136 | {0.641666650772095} | {0.736794412136078} |                          |                       |                    |
-       7 |        2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']   | batch_size=4,epochs=1 | madlib_keras | 1.2197265625 | {0.186789035797119}  | {accuracy}   |         0.358333319426 |       1.09771859646 | {0.358333319425583} | {1.09771859645844}  |                          |                       |                    |
-       8 |        2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']   | batch_size=8,epochs=1 | madlib_keras | 1.2197265625 | {0.186538934707642}  | {accuracy}   |         0.358333319426 |        1.1002266407 | {0.358333319425583} | {1.10022664070129}  |                          |                       |                    |
-       1 |        1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']   | batch_size=4,epochs=1 | madlib_keras | 0.7900390625 | {0.159209012985229}  | {accuracy}   |         0.324999988079 |       10.8797140121 | {0.324999988079071} | {10.879714012146}   |                          |                       |                    |
+       9 |        2 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=4,epochs=1 | madlib_keras | 1.2197265625 | {119.42963886261}    | {accuracy}   |         0.983333349228 |       0.07286978513 | {0.983333349227905} | {0.072869785130024} |                          |                       |                    |
+      10 |        2 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=8,epochs=1 | madlib_keras | 1.2197265625 | {118.485460996628}   | {accuracy}   |         0.975000023842 |     0.0798489004374 | {0.975000023841858} | {0.079848900437355} |                          |                       |                    |
+       4 |        1 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=8,epochs=1 | madlib_keras | 0.7900390625 | {118.707404851913}   | {accuracy}   |         0.975000023842 |      0.143356323242 | {0.975000023841858} | {0.143356323242188} |                          |                       |                    |
+      11 |        2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=4,epochs=1 | madlib_keras | 1.2197265625 | {118.224883794785}   | {accuracy}   |         0.958333313465 |      0.636615753174 | {0.958333313465118} | {0.636615753173828} |                          |                       |                    |
+       2 |        1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']   | batch_size=8,epochs=1 | madlib_keras | 0.7900390625 | {117.732690811157}   | {accuracy}   |         0.925000011921 |      0.161811202765 | {0.925000011920929} | {0.161811202764511} |                          |                       |                    |
+       5 |        1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=4,epochs=1 | madlib_keras | 0.7900390625 | {120.357484817505}   | {accuracy}   |         0.833333313465 |        0.5542948246 | {0.833333313465118} | {0.55429482460022}  |                          |                       |                    |
+       3 |        1 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=4,epochs=1 | madlib_keras | 0.7900390625 | {118.928852796555}   | {accuracy}   |         0.824999988079 |      0.301002770662 | {0.824999988079071} | {0.301002770662308} |                          |                       |                    |
+       6 |        1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=8,epochs=1 | madlib_keras | 0.7900390625 | {120.566634893417}   | {accuracy}   |         0.816666662693 |      0.875298440456 | {0.816666662693024} | {0.87529844045639}  |                          |                       |                    |
+      12 |        2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=8,epochs=1 | madlib_keras | 1.2197265625 | {119.182703018188}   | {accuracy}   |         0.774999976158 |      0.785651266575 | {0.774999976158142} | {0.78565126657486}  |                          |                       |                    |
+       1 |        1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']   | batch_size=4,epochs=1 | madlib_keras | 0.7900390625 | {119.643137931824}   | {accuracy}   |         0.508333325386 |      0.762569189072 | {0.508333325386047} | {0.762569189071655} |                          |                       |                    |
+       7 |        2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']   | batch_size=4,epochs=1 | madlib_keras | 1.2197265625 | {120.15305685997}    | {accuracy}   |         0.333333343267 |       1.09794270992 | {0.333333343267441} | {1.09794270992279}  |                          |                       |                    |
+       8 |        2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']   | batch_size=8,epochs=1 | madlib_keras | 1.2197265625 | {119.911739826202}   | {accuracy}   |         0.333333343267 |       1.10344016552 | {0.333333343267441} | {1.10344016551971}  |                          |                       |                    |
 (12 rows)
 </pre>
 
@@ -1175,20 +1182,20 @@
 SELECT * FROM iris_multi_model_info ORDER BY training_metrics_final DESC, training_loss_final;
 </pre>
 <pre class="result">
- mst_key | model_id |                                 compile_params                                  |      fit_params       |  model_type  |  model_size  |                           metrics_elapsed_time                            | metrics_type | training_metrics_final | training_loss_final |                             training_metrics                              |                               training_loss                                | validation_metrics_final | validation_loss_final |                            validation_metrics                             |                              validation_loss
----------+----------+---------------------------------------------------------------------------------+-----------------------+--------------+--------------+---------------------------------------------------------------------------+--------------+------------------------+---------------------+---------------------------------------------------------------------------+----------------------------------------------------------------------------+--------------------------+-----------------------+---------------------------------------------------------------------------+----------------------------------------------------------------------------
-      10 |        2 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=8,epochs=1 | madlib_keras | 1.2197265625 | {0.19480299949646,0.184041976928711,0.191921949386597,0.24904990196228}   | {accuracy}   |         0.975000023842 |      0.107221193612 | {0.975000023841858,0.883333325386047,0.858333349227905,0.975000023841858} | {0.348870366811752,0.250929206609726,0.251643180847168,0.107221193611622}  |                        1 |       0.0745402574539 | {1,0.933333337306976,0.933333337306976,1}                                 | {0.351353526115417,0.183036848902702,0.160531669855118,0.0745402574539185}
-       3 |        1 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=4,epochs=1 | madlib_keras | 0.7900390625 | {0.168098926544189,0.157669067382812,0.196551084518433,0.159118890762329} | {accuracy}   |         0.925000011921 |       0.15261271596 | {0.883333325386047,0.808333337306976,0.958333313465118,0.925000011920929} | {0.323819071054459,0.359202563762665,0.145684525370598,0.152612715959549}  |           0.966666638851 |       0.0909686908126 | {0.866666674613953,0.666666686534882,1,0.966666638851166}                 | {0.349343299865723,0.458904296159744,0.113910958170891,0.0909686908125877}
-      11 |        2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=4,epochs=1 | madlib_keras | 1.2197265625 | {0.196630954742432,0.182199001312256,0.186440944671631,0.203809022903442} | {accuracy}   |         0.933333337307 |       0.50553715229 | {0.725000023841858,0.758333325386047,0.858333349227905,0.933333337306976} | {0.884528338909149,0.746422350406647,0.588649451732635,0.505537152290344}  |           0.933333337307 |        0.552737057209 | {0.600000023841858,0.699999988079071,0.800000011920929,0.933333337306976} | {0.909559547901154,0.794417083263397,0.645792782306671,0.552737057209015}
-       9 |        2 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=4,epochs=1 | madlib_keras | 1.2197265625 | {0.243091106414795,0.183151960372925,0.201867818832397,0.182805061340332} | {accuracy}   |         0.891666650772 |      0.208901122212 | {0.850000023841858,0.958333313465118,0.975000023841858,0.891666650772095} | {0.408110946416855,0.144096985459328,0.0838083922863007,0.20890112221241}  |           0.866666674614 |        0.270807653666 | {0.733333349227905,0.933333337306976,0.966666638851166,0.866666674613953} | {0.427353918552399,0.152344897389412,0.0704043209552765,0.270807653665543}
-       5 |        1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=4,epochs=1 | madlib_keras | 0.7900390625 | {0.15792989730835,0.171962976455688,0.226946830749512,0.161045074462891}  | {accuracy}   |         0.850000023842 |      0.480257481337 | {0.616666674613953,0.683333337306976,0.75,0.850000023841858}              | {0.948418378829956,0.743716657161713,0.534102499485016,0.480257481336594}  |           0.800000011921 |        0.515601098537 | {0.533333361148834,0.600000023841858,0.600000023841858,0.800000011920929} | {0.998877584934235,0.809476494789124,0.576340734958649,0.515601098537445}
-       8 |        2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']   | batch_size=8,epochs=1 | madlib_keras | 1.2197265625 | {0.184230089187622,0.20333194732666,0.191611051559448,0.187481880187988}  | {accuracy}   |         0.883333325386 |      0.341951817274 | {0.658333361148834,0.941666662693024,0.958333313465118,0.883333325386047} | {0.442611187696457,0.403654724359512,0.0869002863764763,0.341951817274094} |           0.800000011921 |        0.407213389874 | {0.766666650772095,0.899999976158142,0.966666638851166,0.800000011920929} | {0.354744076728821,0.380748003721237,0.0524398759007454,0.407213389873505}
-       2 |        1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']   | batch_size=8,epochs=1 | madlib_keras | 0.7900390625 | {0.169033050537109,0.160851001739502,0.172677993774414,0.166024923324585} | {accuracy}   |         0.641666650772 |      0.474238961935 | {0.899999976158142,0.949999988079071,0.783333361148834,0.641666650772095} | {0.255419433116913,0.1908118724823,0.767927944660187,0.474238961935043}    |           0.766666650772 |        0.426240265369 | {0.966666638851166,0.966666638851166,0.833333313465118,0.766666650772095} | {0.225459188222885,0.171880543231964,0.588224768638611,0.426240265369415}
-       4 |        1 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=8,epochs=1 | madlib_keras | 0.7900390625 | {0.163635015487671,0.217028856277466,0.412152051925659,0.163925886154175} | {accuracy}   |         0.841666638851 |      0.351104289293 | {0.866666674613953,0.916666686534882,0.941666662693024,0.841666638851166} | {0.341254085302353,0.210138097405434,0.152032792568207,0.351104289293289}  |           0.733333349228 |        0.462497830391 | {0.800000011920929,0.966666638851166,0.933333337306976,0.733333349227905} | {0.370902478694916,0.168067514896393,0.152486190199852,0.46249783039093}
-       1 |        1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']   | batch_size=4,epochs=1 | madlib_keras | 0.7900390625 | {0.160864114761353,0.515794038772583,0.170866966247559,0.162554025650024} | {accuracy}   |         0.774999976158 |      0.372270941734 | {0.608333349227905,0.941666662693024,0.774999976158142,0.774999976158142} | {0.541361212730408,0.23515048623085,0.341034829616547,0.372270941734314}   |           0.699999988079 |        0.499996572733 | {0.733333349227905,0.966666638851166,0.699999988079071,0.699999988079071} | {0.477739661931992,0.209625095129013,0.454505026340485,0.499996572732925}
-      12 |        2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=8,epochs=1 | madlib_keras | 1.2197265625 | {0.184638977050781,0.180385828018188,0.206297159194946,0.186070919036865} | {accuracy}   |         0.691666662693 |      0.721061587334 | {0.25,0.675000011920929,0.733333349227905,0.691666662693024}              | {0.992778599262238,0.875843703746796,0.761732041835785,0.721061587333679}  |           0.600000023842 |        0.736407101154 | {0.266666680574417,0.633333325386047,0.733333349227905,0.600000023841858} | {0.996518731117249,0.881480932235718,0.763313174247742,0.736407101154327}
-       6 |        1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=8,epochs=1 | madlib_keras | 0.7900390625 | {0.154546976089478,0.16751503944397,0.162903070449829,0.158049821853638}  | {accuracy}   |         0.683333337307 |        0.7804261446 | {0.358333319425583,0.358333319425583,0.675000011920929,0.683333337306976} | {1.20112609863281,0.947584271430969,0.819790959358215,0.780426144599915}   |           0.600000023842 |        0.848570525646 | {0.233333334326744,0.233333334326744,0.566666662693024,0.600000023841858} | {1.2944587469101,1.01824104785919,0.891107201576233,0.84857052564621}
-       7 |        2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']   | batch_size=4,epochs=1 | madlib_keras | 1.2197265625 | {0.19166898727417,0.202822923660278,0.192266941070557,0.188740015029907}  | {accuracy}   |         0.358333319426 |       1.11373853683 | {0.358333319425583,0.358333319425583,0.358333319425583,0.358333319425583} | {1.10469853878021,1.10379803180695,1.1021580696106,1.11373853683472}       |           0.233333334327 |         1.16589236259 | {0.233333334326744,0.233333334326744,0.233333334326744,0.233333334326744} | {1.15103197097778,1.14804196357727,1.13999223709106,1.1658923625946}
+ mst_key | model_id |                                 compile_params                                  |      fit_params       |  model_type  |  model_size  |                         metrics_elapsed_time                          | metrics_type | training_metrics_final | training_loss_final |                             training_metrics                              |                               training_loss                               | validation_metrics_final | validation_loss_final |                            validation_metrics                             |                              validation_loss
+---------+----------+---------------------------------------------------------------------------------+-----------------------+--------------+--------------+-----------------------------------------------------------------------+--------------+------------------------+---------------------+---------------------------------------------------------------------------+---------------------------------------------------------------------------+--------------------------+-----------------------+---------------------------------------------------------------------------+---------------------------------------------------------------------------
+       4 |        1 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=8,epochs=1 | madlib_keras | 0.7900390625 | {37.0420558452606,78.2046208381653,116.242669820786,134.287139892578} | {accuracy}   |         0.975000023842 |      0.165132269263 | {0.75,0.958333313465118,0.958333313465118,0.975000023841858}              | {0.618549585342407,0.319452553987503,0.223872095346451,0.165132269263268} |           0.966666638851 |        0.213689729571 | {0.733333349227905,0.933333337306976,0.933333337306976,0.966666638851166} | {0.683791160583496,0.370491921901703,0.255890935659409,0.213689729571342}
+       2 |        1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']   | batch_size=8,epochs=1 | madlib_keras | 0.7900390625 | {36.3931469917297,77.5780539512634,115.430645942688,133.599857807159} | {accuracy}   |         0.966666638851 |      0.277698725462 | {0.591666638851166,0.966666638851166,0.666666686534882,0.966666638851166} | {0.634598553180695,0.334936827421188,0.615665555000305,0.27769872546196}  |           0.966666638851 |         0.34405490756 | {0.5,0.966666638851166,0.566666662693024,0.966666638851166}               | {0.643225967884064,0.41021603345871,0.805291295051575,0.344054907560349}
+      10 |        2 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=8,epochs=1 | madlib_keras | 1.2197265625 | {36.8482949733734,78.0155048370361,115.83317399025,134.079672813416}  | {accuracy}   |         0.958333313465 |      0.122385449708 | {0.883333325386047,0.941666662693024,0.858333349227905,0.958333313465118} | {0.291894346475601,0.146935686469078,0.270052850246429,0.122385449707508} |           0.933333337307 |        0.181496843696 | {0.766666650772095,0.866666674613953,0.899999976158142,0.933333337306976} | {0.395013928413391,0.245234906673431,0.301119148731232,0.181496843695641}
+       3 |        1 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=4,epochs=1 | madlib_keras | 0.7900390625 | {37.2318170070648,78.3925468921661,116.45490694046,134.491376876831}  | {accuracy}   |         0.941666662693 |      0.193545326591 | {0.966666638851166,0.941666662693024,0.941666662693024,0.941666662693024} | {0.39665362238884,0.213271111249924,0.190151125192642,0.193545326590538}  |           0.933333337307 |        0.151459023356 | {1,0.966666638851166,0.933333337306976,0.933333337306976}                 | {0.464315593242645,0.198051139712334,0.138570576906204,0.151459023356438}
+       9 |        2 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=4,epochs=1 | madlib_keras | 1.2197265625 | {37.6678929328918,78.820240020752,116.939878940582,134.959810972214}  | {accuracy}   |         0.925000011921 |      0.192344605923 | {0.824999988079071,0.774999976158142,0.966666638851166,0.925000011920929} | {0.434513121843338,0.326292037963867,0.131333693861961,0.192344605922699} |           0.899999976158 |        0.209528595209 | {0.800000011920929,0.766666650772095,0.966666638851166,0.899999976158142} | {0.52033931016922,0.344535797834396,0.170280396938324,0.209528595209122}
+       8 |        2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']   | batch_size=8,epochs=1 | madlib_keras | 1.2197265625 | {38.0689258575439,79.4995639324188,117.36315202713,135.380483865738}  | {accuracy}   |         0.866666674614 |      0.390509605408 | {0.691666662693024,0.691666662693024,0.633333325386047,0.866666674613953} | {0.490214675664902,0.444783747196198,0.627961099147797,0.390509605407715} |           0.933333337307 |        0.376114845276 | {0.566666662693024,0.566666662693024,0.533333361148834,0.933333337306976} | {0.575542628765106,0.54660427570343,0.785183191299438,0.376114845275879}
+       5 |        1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=4,epochs=1 | madlib_keras | 0.7900390625 | {38.474328994751,79.9709329605103,117.766183853149,135.803887844086}  | {accuracy}   |         0.841666638851 |      0.576696753502 | {0.616666674613953,0.699999988079071,0.758333325386047,0.841666638851166} | {0.90448260307312,0.750164151191711,0.616493880748749,0.576696753501892}  |           0.899999976158 |        0.631914675236 | {0.666666686534882,0.699999988079071,0.733333349227905,0.899999976158142} | {0.871200919151306,0.780709445476532,0.665971457958221,0.631914675235748}
+      11 |        2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=4,epochs=1 | madlib_keras | 1.2197265625 | {36.6214678287506,77.7987759113312,115.631717920303,133.83836388588}  | {accuracy}   |         0.758333325386 |      0.881635427475 | {0.308333337306976,0.316666662693024,0.75,0.758333325386047}              | {1.12997460365295,1.02749967575073,0.923768699169159,0.881635427474976}   |           0.766666650772 |        0.878168046474 | {0.433333337306976,0.433333337306976,0.766666650772095,0.766666650772095} | {1.07487094402313,0.974115014076233,0.916269063949585,0.878168046474457}
+       7 |        2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']   | batch_size=4,epochs=1 | madlib_keras | 1.2197265625 | {38.2849600315094,79.7524738311768,117.580325841904,135.606695890427} | {accuracy}   |         0.691666662693 |      0.444524824619 | {0.908333361148834,0.391666680574417,0.691666662693024,0.691666662693024} | {0.335082054138184,2.02327847480774,0.444351017475128,0.444524824619293}  |           0.566666662693 |        0.539750337601 | {0.800000011920929,0.266666680574417,0.566666662693024,0.566666662693024} | {0.433189332485199,2.3276960849762,0.534160375595093,0.539750337600708}
+       6 |        1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=8,epochs=1 | madlib_keras | 0.7900390625 | {38.6593668460846,80.1789360046387,117.957875013351,135.995815992355} | {accuracy}   |         0.683333337307 |      0.841839790344 | {0.316666662693024,0.366666674613953,0.666666686534882,0.683333337306976} | {1.07646071910858,0.963329672813416,0.87216705083847,0.841839790344238}   |           0.666666686535 |        0.840192914009 | {0.433333337306976,0.533333361148834,0.666666686534882,0.666666686534882} | {1.02845978736877,0.941896677017212,0.861787617206573,0.840192914009094}
+       1 |        1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']   | batch_size=4,epochs=1 | madlib_keras | 0.7900390625 | {37.8553328514099,79.2480089664459,117.139881849289,135.155915975571} | {accuracy}   |         0.358333319426 |       1.11013436317 | {0.358333319425583,0.333333343267441,0.333333343267441,0.358333319425583} | {1.10554325580597,1.11694586277008,1.09756696224213,1.11013436317444}     |           0.233333334327 |         1.17629003525 | {0.233333334326744,0.333333343267441,0.333333343267441,0.233333334326744} | {1.16081762313843,1.14324629306793,1.11625325679779,1.1762900352478}
+      12 |        2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=8,epochs=1 | madlib_keras | 1.2197265625 | {37.4500079154968,78.6058378219604,116.700626850128,134.72905087471}  | {accuracy}   |         0.308333337307 |       1.06241953373 | {0.150000005960464,0.333333343267441,0.333333343267441,0.308333337306976} | {1.13338851928711,1.09694564342499,1.07030868530273,1.06241953372955}     |           0.433333337307 |         1.03659796715 | {0.16666667163372,0.333333343267441,0.433333337306976,0.433333337306976}  | {1.06262242794037,1.07252764701843,1.05843663215637,1.03659796714783}
 (12 rows)
 </pre>
 
@@ -1287,20 +1294,20 @@
 SELECT * FROM iris_multi_model_info ORDER BY training_metrics_final DESC, training_loss_final;
 </pre>
 <pre class="result">
- mst_key | model_id |                                 compile_params                                  |      fit_params       |  model_type  |  model_size  |                  metrics_elapsed_time                   | metrics_type | training_metrics_final | training_loss_final |                    training_metrics                     |                       training_loss                       | validation_metrics_final | validation_loss_final |                   validation_metrics                    |                     validation_loss
----------+----------+---------------------------------------------------------------------------------+-----------------------+--------------+--------------+---------------------------------------------------------+--------------+------------------------+---------------------+---------------------------------------------------------+-----------------------------------------------------------+--------------------------+-----------------------+---------------------------------------------------------+----------------------------------------------------------
-       3 |        1 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=4,epochs=1 | madlib_keras | 0.7900390625 | {0.164853096008301,0.264705181121826,0.157214879989624} | {accuracy}   |         0.966666638851 |     0.0860336050391 | {0.966666638851166,0.875,0.966666638851166}             | {0.10731378942728,0.237401679158211,0.0860336050391197}   |                        1 |       0.0690980628133 | {1,0.933333337306976,1}                                 | {0.0825482085347176,0.146834880113602,0.069098062813282}
-       9 |        2 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=4,epochs=1 | madlib_keras | 1.2197265625 | {0.182960987091064,0.192347049713135,0.185945987701416} | {accuracy}   |         0.941666662693 |       0.16336736083 | {0.966666638851166,0.975000023841858,0.941666662693024} | {0.0924900621175766,0.0709503665566444,0.163367360830307} |           0.933333337307 |        0.196538448334 | {0.933333337306976,1,0.933333337306976}                 | {0.0884167701005936,0.0340272337198257,0.19653844833374}
-      11 |        2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=4,epochs=1 | madlib_keras | 1.2197265625 | {0.187994003295898,0.181873798370361,0.188920021057129} | {accuracy}   |         0.899999976158 |      0.329349696636 | {0.908333361148834,0.883333325386047,0.899999976158142} | {0.373963922262192,0.349062085151672,0.3293496966362}     |           0.933333337307 |        0.333393543959 | {0.933333337306976,0.899999976158142,0.933333337306976} | {0.368066221475601,0.367246687412262,0.333393543958664}
-      10 |        2 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=8,epochs=1 | madlib_keras | 1.2197265625 | {0.184596061706543,0.379925012588501,0.186527013778687} | {accuracy}   |         0.908333361149 |      0.202177524567 | {0.866666674613953,0.966666638851166,0.908333361148834} | {0.250507324934006,0.127185359597206,0.20217752456665}    |           0.866666674614 |        0.268144398928 | {0.800000011920929,1,0.866666674613953}                 | {0.331987112760544,0.11472400277853,0.268144398927689}
-      12 |        2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=8,epochs=1 | madlib_keras | 1.2197265625 | {0.187471866607666,0.19880199432373,0.180361032485962}  | {accuracy}   |         0.908333361149 |      0.408491075039 | {0.833333313465118,0.925000011920929,0.908333361148834} | {0.492850959300995,0.451007574796677,0.40849107503891}    |           0.833333313465 |        0.449156552553 | {0.800000011920929,0.866666674613953,0.833333313465118} | {0.543693006038666,0.486827403306961,0.449156552553177}
-       8 |        2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']   | batch_size=8,epochs=1 | madlib_keras | 1.2197265625 | {0.187476873397827,0.190106868743896,0.183273077011108} | {accuracy}   |         0.916666686535 |      0.204672813416 | {0.925000011920929,0.725000023841858,0.916666686534882} | {0.224613606929779,0.507380962371826,0.204672813415527}   |           0.833333313465 |        0.253615111113 | {0.833333313465118,0.666666686534882,0.833333313465118} | {0.261870205402374,0.674266278743744,0.253615111112595}
-       4 |        1 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=8,epochs=1 | madlib_keras | 0.7900390625 | {0.158944129943848,0.190206050872803,0.162253141403198} | {accuracy}   |                  0.875 |      0.289537638426 | {0.908333361148834,0.949999988079071,0.875}             | {0.177524402737617,0.134268626570702,0.289537638425827}   |           0.800000011921 |        0.416592538357 | {0.933333337306976,0.933333337306976,0.800000011920929} | {0.127392664551735,0.109949067234993,0.416592538356781}
-       6 |        1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=8,epochs=1 | madlib_keras | 0.7900390625 | {0.17385196685791,0.167200088500977,0.15508508682251}   | {accuracy}   |         0.850000023842 |      0.874475240707 | {0.683333337306976,0.683333337306976,0.850000023841858} | {0.942066073417664,0.909405112266541,0.874475240707397}   |           0.800000011921 |        0.901099026203 | {0.600000023841858,0.600000023841858,0.800000011920929} | {0.978309333324432,0.946642935276031,0.901099026203156}
-       5 |        1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=4,epochs=1 | madlib_keras | 0.7900390625 | {0.175297975540161,0.166980028152466,0.157662153244019} | {accuracy}   |         0.791666686535 |      0.473440766335 | {0.691666662693024,0.758333325386047,0.791666686534882} | {0.555641710758209,0.510370552539825,0.473440766334534}   |           0.699999988079 |        0.532045960426 | {0.600000023841858,0.666666686534882,0.699999988079071} | {0.637348413467407,0.572735965251923,0.532045960426331}
-       7 |        2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']   | batch_size=4,epochs=1 | madlib_keras | 1.2197265625 | {0.370399951934814,0.185209989547729,0.184471845626831} | {accuracy}   |         0.683333337307 |      0.467345923185 | {0.683333337306976,0.683333337306976,0.683333337306976} | {0.476419776678085,0.466614902019501,0.467345923185349}   |           0.600000023842 |        0.441450744867 | {0.600000023841858,0.600000023841858,0.600000023841858} | {0.495125651359558,0.452383369207382,0.441450744867325}
-       1 |        1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']   | batch_size=4,epochs=1 | madlib_keras | 0.7900390625 | {0.161419153213501,0.168106079101562,0.156718969345093} | {accuracy}   |         0.358333319426 |       1.11533606052 | {0.358333319425583,0.358333319425583,0.358333319425583} | {1.1051013469696,1.11064600944519,1.11533606052399}       |           0.233333334327 |         1.15986251831 | {0.233333334326744,0.233333334326744,0.233333334326744} | {1.14333879947662,1.16091287136078,1.15986251831055}
-       2 |        1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']   | batch_size=8,epochs=1 | madlib_keras | 0.7900390625 | {0.1615891456604,0.159286975860596,0.165864944458008}   | {accuracy}   |         0.358333319426 |       1.11393201351 | {0.358333319425583,0.358333319425583,0.358333319425583} | {1.11442768573761,1.10136640071869,1.11393201351166}      |           0.233333334327 |         1.17370998859 | {0.233333334326744,0.233333334326744,0.233333334326744} | {1.13597917556763,1.13979840278625,1.17370998859406}
+ mst_key | model_id |                                 compile_params                                  |      fit_params       |  model_type  |  model_size  |                 metrics_elapsed_time                 | metrics_type | training_metrics_final | training_loss_final |                    training_metrics                     |                      training_loss                       | validation_metrics_final | validation_loss_final |                   validation_metrics                    |                     validation_loss
+---------+----------+---------------------------------------------------------------------------------+-----------------------+--------------+--------------+------------------------------------------------------+--------------+------------------------+---------------------+---------------------------------------------------------+----------------------------------------------------------+--------------------------+-----------------------+---------------------------------------------------------+---------------------------------------------------------
+       5 |        1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=4,epochs=1 | madlib_keras | 0.7900390625 | {19.451014995575,37.2563629150391,54.7182998657227}  | {accuracy}   |         0.975000023842 |      0.490673750639 | {0.958333313465118,0.691666662693024,0.975000023841858} | {0.541427075862885,0.517450392246246,0.490673750638962}  |           0.933333337307 |        0.557333409786 | {0.933333337306976,0.666666686534882,0.933333337306976} | {0.60710871219635,0.570206344127655,0.557333409786224}
+       9 |        2 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=4,epochs=1 | madlib_keras | 0.7900390625 | {18.2973220348358,36.3793680667877,54.0178129673004} | {accuracy}   |         0.966666638851 |     0.0894369781017 | {0.966666638851166,0.966666638851166,0.966666638851166} | {0.133233144879341,0.111788973212242,0.0894369781017303} |           0.899999976158 |        0.195293620229 | {0.933333337306976,0.966666638851166,0.899999976158142} | {0.156044512987137,0.132803827524185,0.195293620228767}
+       4 |        1 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=8,epochs=1 | madlib_keras | 1.2197265625 | {17.6080539226532,35.6788699626923,53.3836889266968} | {accuracy}   |         0.966666638851 |      0.147051945329 | {0.908333361148834,0.958333313465118,0.966666638851166} | {0.225205257534981,0.168186634778976,0.147051945328712}  |           0.866666674614 |        0.250319689512 | {0.899999976158142,0.933333337306976,0.866666674613953} | {0.23467344045639,0.182851999998093,0.250319689512253}
+       8 |        2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']   | batch_size=8,epochs=1 | madlib_keras | 1.2197265625 | {18.7529940605164,36.8255958557129,54.3704080581665} | {accuracy}   |         0.966666638851 |      0.244641214609 | {0.691666662693024,0.891666650772095,0.966666638851166} | {0.939713299274445,0.462556451559067,0.244641214609146}  |           0.966666638851 |        0.298279434443 | {0.566666662693024,0.966666638851166,0.966666638851166} | {1.30671143531799,0.412235885858536,0.29827943444252}
+      10 |        2 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=8,epochs=1 | madlib_keras | 0.7900390625 | {17.4004180431366,35.4556438922882,53.1877279281616} | {accuracy}   |         0.958333313465 |      0.123381219804 | {0.949999988079071,0.766666650772095,0.958333313465118} | {0.0919980704784393,0.576169073581696,0.123381219804287} |           0.933333337307 |        0.203262642026 | {0.866666674613953,0.766666650772095,0.933333337306976} | {0.199721112847328,0.959742486476898,0.203262642025948}
+       3 |        1 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=4,epochs=1 | madlib_keras | 1.2197265625 | {17.81547498703,35.8978669643402,53.5737180709839}   | {accuracy}   |         0.933333337307 |      0.150664463639 | {0.941666662693024,0.925000011920929,0.933333337306976} | {0.117781177163124,0.163000836968422,0.150664463639259}  |           0.833333313465 |        0.365329563618 | {0.866666674613953,0.833333313465118,0.833333313465118} | {0.249404579401016,0.375173389911652,0.365329563617706}
+       6 |        1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=8,epochs=1 | madlib_keras | 1.2197265625 | {19.686233997345,37.4543249607086,54.8708770275116}  | {accuracy}   |         0.858333349228 |      0.743227303028 | {0.675000011920929,0.708333313465118,0.858333349227905} | {0.808507084846497,0.774080872535706,0.743227303028107}  |           0.966666638851 |        0.770158529282 | {0.666666686534882,0.666666686534882,0.966666638851166} | {0.808504283428192,0.791898012161255,0.770158529281616}
+      11 |        2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=4,epochs=1 | madlib_keras | 1.2197265625 | {17.1583528518677,35.0312390327454,52.96133685112}   | {accuracy}   |         0.816666662693 |      0.739802956581 | {0.774999976158142,0.816666662693024,0.816666662693024} | {0.83727890253067,0.792884111404419,0.739802956581116}   |           0.800000011921 |        0.758302807808 | {0.766666650772095,0.800000011920929,0.800000011920929} | {0.837629973888397,0.801746726036072,0.758302807807922}
+       2 |        1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']   | batch_size=8,epochs=1 | madlib_keras | 1.2197265625 | {16.9146749973297,34.794900894165,52.7328250408173}  | {accuracy}   |         0.808333337307 |      0.303489625454 | {0.683333337306976,0.966666638851166,0.808333337306976} | {1.05107569694519,0.189959138631821,0.303489625453949}   |           0.866666674614 |        0.285375326872 | {0.666666686534882,0.966666638851166,0.866666674613953} | {1.01942157745361,0.238933652639389,0.285375326871872}
+      12 |        2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=8,epochs=1 | madlib_keras | 0.7900390625 | {18.0590150356293,36.1394078731537,53.7930529117584} | {accuracy}   |         0.699999988079 |       1.02253305912 | {0.550000011920929,0.691666662693024,0.699999988079071} | {1.0493084192276,1.03803598880768,1.02253305912018}      |           0.666666686535 |         1.02013540268 | {0.633333325386047,0.600000023841858,0.666666686534882} | {1.03952574729919,1.03439521789551,1.02013540267944}
+       7 |        2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']   | batch_size=4,epochs=1 | madlib_keras | 1.2197265625 | {19.2141709327698,37.0566499233246,54.5629329681396} | {accuracy}   |         0.691666662693 |      0.448221176863 | {0.691666662693024,0.691666662693024,0.691666662693024} | {0.447027385234833,0.444605946540833,0.448221176862717}  |           0.566666662693 |        0.555035352707 | {0.566666662693024,0.566666662693024,0.566666662693024} | {0.551217257976532,0.540408432483673,0.555035352706909}
+       1 |        1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']   | batch_size=4,epochs=1 | madlib_keras | 1.2197265625 | {18.501914024353,36.5938439369202,54.194118976593}   | {accuracy}   |         0.358333319426 |       1.09730923176 | {0.333333343267441,0.333333343267441,0.358333319425583} | {1.09999334812164,1.10405397415161,1.09730923175812}     |           0.233333334327 |         1.12532019615 | {0.333333343267441,0.333333343267441,0.233333334326744} | {1.12446486949921,1.13782525062561,1.12532019615173}
 (12 rows)
 </pre>
 Note that the loss and accuracy values pick up from where the previous run left off.
@@ -1322,6 +1329,10 @@
 
 4. Here are some more details on how warm start works.  These details are mostly applicable when implementing autoML algorithms on top of MADlib's model selection.  In short, the 'model_selection_table' dictates which models get trained and output to the 'model_output_table' and associated summary and info tables.  When 'warm_start' is TRUE, models are built for each 'mst_key' in the 'model_selection_table'.  If there are prior runs for an 'mst_key' then the weights from that run will be used.  If there are no prior runs for an 'mst_key' then random initialization will be used.  For example, let's say we start with 'mst_keys' of 1, 2, 3, and 4 in the 'model_selection_table'. We run fit once to get model and info tables for 1, 2, 3, and 4.  Then we modify the 'model_selection_table' as part of an autoML scheme, in which we remove the 'mst_key' for 1 and add a new 'mst_key' for 5.  Next we run fit with warm start.  The result will be models created for 'mst_keys' of 2, 3, 4, and 5.  Warm start will be used for 2, 3, and 4 (using prior run) and random initialization will be used for 5 (no prior run).  The 'mst_key' of 1 will be dropped.
 
+5. The 'num_iterations' parameter and the Keras fit parameter 'epochs' can substantially affect accuracy and run-time.
+In general, increasing the number of 'epochs' for a fixed 'num_iterations' will speed up training, but may result
+in lower accuracy.  It's best to do some experimentation to find out what works for your models and dataset.
+
 @anchor background
 @par Technical Background
 
@@ -1334,12 +1345,12 @@
 model averaging [2].
 
 On the effect of database cluster size: as the database cluster size increases,
-it will be proportionally faster to train a set of models, as long as you have at
+it will be faster to train a set of models, as long as you have at
 least as many model selection tuples as segments. This is because model state is "hopped" from
 segment to segment and training takes place in parallel [1,2]. If you have fewer model
 selection tuples to train than segments, then some
-segments may not be busy 100% of the time so speedup will not necessarily be linear with
-database cluster size. Inference (predict) is an embarrassingly parallel operation so
+segments may not be busy 100% of the time so speedup will not necessarily increase
+on a larger cluster. Inference (predict) is an embarrassingly parallel operation so
 inference runtimes will be proportionally faster as the number of segments increases.
 
 @anchor literature
diff --git a/src/ports/postgres/modules/deep_learning/madlib_keras_model_selection.sql_in b/src/ports/postgres/modules/deep_learning/madlib_keras_model_selection.sql_in
index 8697ec4..c15757c 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras_model_selection.sql_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras_model_selection.sql_in
@@ -39,6 +39,7 @@
 <div class="toc"><b>Contents</b><ul>
 <li class="level1"><a href="#load_mst_table">Load Model Selection Table</a></li>
 <li class="level1"><a href="#example">Examples</a></li>
+<li class="level1"><a href="#notes">Notes</a></li>
 <li class="level1"><a href="#related">Related Topics</a></li>
 </ul></div>
 
@@ -404,6 +405,15 @@
 (1 row)
 </pre>
 
+@anchor notes
+@par Notes
+
+1. In this method, the same compile and fit parameters are applied to all model architectures
+when generating combinations.  However, you may wish to have different compile and fit parameters
+for each model.  To do so, call 'load_model_selection_table'
+multiple times - once for each model.  Then you can combine the resulting tables using UNION or other means.
+Note that the 'mst_key' must be unique so you will need to regenerate it in your final combined table.
+
 @anchor related
 @par Related Topics