blob: 67d89430a7e246bfded045b2ebc1684c874427de [file] [log] [blame]
<!-- HTML header for doxygen 1.8.4-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen 1.8.13"/>
<meta name="keywords" content="madlib,postgres,greenplum,machine learning,data mining,deep learning,ensemble methods,data science,market basket analysis,affinity analysis,pca,lda,regression,elastic net,huber white,proportional hazards,k-means,latent dirichlet allocation,bayes,support vector machines,svm"/>
<title>MADlib: Setup Model Selection</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="navtree.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="resize.js"></script>
<script type="text/javascript" src="navtreedata.js"></script>
<script type="text/javascript" src="navtree.js"></script>
<script type="text/javascript">
$(document).ready(initResizable);
</script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/searchdata.js"></script>
<script type="text/javascript" src="search/search.js"></script>
<script type="text/javascript">
$(document).ready(function() { init_search(); });
</script>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
extensions: ["tex2jax.js", "TeX/AMSmath.js", "TeX/AMSsymbols.js"],
jax: ["input/TeX","output/HTML-CSS"],
});
</script><script type="text/javascript" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js"></script>
<!-- hack in the navigation tree -->
<script type="text/javascript" src="eigen_navtree_hacks.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
<link href="madlib_extra.css" rel="stylesheet" type="text/css"/>
<!-- google analytics -->
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-45382226-1', 'madlib.apache.org');
ga('send', 'pageview');
</script>
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<td id="projectlogo"><a href="http://madlib.apache.org"><img alt="Logo" src="madlib.png" height="50" style="padding-left:0.5em;" border="0"/ ></a></td>
<td style="padding-left: 0.5em;">
<div id="projectname">
<span id="projectnumber">1.17.0</span>
</div>
<div id="projectbrief">User Documentation for Apache MADlib</div>
</td>
<td> <div id="MSearchBox" class="MSearchBoxInactive">
<span class="left">
<img id="MSearchSelect" src="search/mag_sel.png"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
alt=""/>
<input type="text" id="MSearchField" value="Search" accesskey="S"
onfocus="searchBox.OnSearchFieldFocus(true)"
onblur="searchBox.OnSearchFieldFocus(false)"
onkeyup="searchBox.OnSearchFieldChange(event)"/>
</span><span class="right">
<a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a>
</span>
</div>
</td>
</tr>
</tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.8.13 -->
<script type="text/javascript">
var searchBox = new SearchBox("searchBox", "search",false,'Search');
</script>
</div><!-- top -->
<div id="side-nav" class="ui-resizable side-nav-resizable">
<div id="nav-tree">
<div id="nav-tree-contents">
<div id="nav-sync" class="sync"></div>
</div>
</div>
<div id="splitbar" style="-moz-user-select:none;"
class="ui-resizable-handle">
</div>
</div>
<script type="text/javascript">
$(document).ready(function(){initNavTree('group__grp__keras__setup__model__selection.html','');});
</script>
<div id="doc-content">
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
onkeydown="return searchBox.OnSearchSelectKey(event)">
</div>
<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0"
name="MSearchResults" id="MSearchResults">
</iframe>
</div>
<div class="header">
<div class="headertitle">
<div class="title">Setup Model Selection<div class="ingroups"><a class="el" href="group__grp__early__stage.html">Early Stage Development</a> &raquo; <a class="el" href="group__grp__dl.html">Deep Learning</a> &raquo; <a class="el" href="group__grp__model__selection.html">Model Selection for DL</a></div></div> </div>
</div><!--header-->
<div class="contents">
<dl class="section warning"><dt>Warning</dt><dd><em> This MADlib method is still in early stage development. Interface and implementation are subject to change. </em></dd></dl>
<div class="toc"><b>Contents</b><ul>
<li class="level1">
<a href="#load_mst_table">Load Model Selection Table</a> </li>
<li class="level1">
<a href="#example">Examples</a> </li>
<li class="level1">
<a href="#notes">Notes</a> </li>
<li class="level1">
<a href="#related">Related Topics</a> </li>
</ul>
</div><p>This utility function sets up a model selection table for use by the multiple model Keras fit feature of MADlib. By model selection we mean both hyperparameter tuning and model architecture search. The table defines the unique combinations of model architectures, compile and fit parameters to run on a massively parallel processing database cluster.</p>
<p><a class="anchor" id="load_mst_table"></a></p><dl class="section user"><dt>Load Model Selection Table</dt><dd></dd></dl>
<pre class="syntax">
load_model_selection_table(
model_arch_table,
model_selection_table,
model_id_list,
compile_params_list,
fit_params_list
)
</pre><p><b>Arguments</b> </p><dl class="arglist">
<dt>model_arch_table </dt>
<dd><p class="startdd">VARCHAR. Table containing model architectures and weights. For more information on this table refer to <a href="group__grp__keras__model__arch.html">Load Model</a>. </p>
<p class="enddd"></p>
</dd>
<dt>model_selection_table </dt>
<dd><p class="startdd">VARCHAR. Model selection table created by this utility. A summary table named &lt;model_selection_table&gt;_summary is also created. Contents of both output tables are described below. </p>
<p class="enddd"></p>
</dd>
<dt>model_id_list </dt>
<dd><p class="startdd">INTEGER[]. Array of model IDs from the 'model_arch_table' to be included in the run combinations. For hyperparameter search, this will typically be one model ID. For model architecture search, this will be the different model IDs that you want to test. </p>
<p class="enddd"></p>
</dd>
<dt>compile_params_list </dt>
<dd><p class="startdd">VARCHAR[]. Array of compile parameters to be tested. Each element of the array should consist of a string of compile parameters exactly as it is to be passed to Keras. </p>
<p class="enddd"></p>
</dd>
<dt>fit_params_list </dt>
<dd><p class="startdd">VARCHAR[]. Array of fit parameters to be tested. Each element of the array should consist of a string of fit parameters exactly as it is to be passed to Keras. </p>
<p class="enddd"></p>
</dd>
</dl>
<p><b>Output table</b> <br />
The model selection output table contains the following columns: </p><table class="output">
<tr>
<th>mst_key </th><td>INTEGER. ID that defines a unique tuple for model architecture-compile parameters-fit parameters. </td></tr>
<tr>
<th>model_id </th><td>VARCHAR. Model architecture ID from the 'model_arch_table'. </td></tr>
<tr>
<th>compile_params </th><td>VARCHAR. Keras compile parameters. </td></tr>
<tr>
<th>fit_params </th><td>VARCHAR. Keras fit parameters. </td></tr>
</table>
<p>A summary table named &lt;model_selection_table&gt;_summary is also created, which contains the following column: </p><table class="output">
<tr>
<th>model_arch_table </th><td>VARCHAR. Name of the model architecture table containing the model architecture IDs. </td></tr>
</table>
<p><a class="anchor" id="example"></a></p><dl class="section user"><dt>Examples</dt><dd><ol type="1">
<li>The model selection table works in conjunction with a model architecture table, so we first create a model architecture table with two different models. Use Keras to define a model architecture with 1 hidden layer: <pre class="example">
import keras
from keras.models import Sequential
from keras.layers import Dense
model1 = Sequential()
model1.add(Dense(10, activation='relu', input_shape=(4,)))
model1.add(Dense(10, activation='relu'))
model1.add(Dense(3, activation='softmax'))
model1.summary()
<pre class="fragment">_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_1 (Dense) (None, 10) 50
_________________________________________________________________
dense_2 (Dense) (None, 10) 110
_________________________________________________________________
dense_3 (Dense) (None, 3) 33
=================================================================
Total params: 193
Trainable params: 193
Non-trainable params: 0
</pre>
</pre> Export the model to JSON: <pre class="example">
model1.to_json()
</pre> <pre class="result">
'{"class_name": "Sequential", "keras_version": "2.1.6", "config": [{"class_name": "Dense", "config": {"kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "uniform", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "dense_1", "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "dtype": "float32", "activation": "relu", "trainable": true, "kernel_regularizer": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "units": 10, "batch_input_shape": [null, 4], "use_bias": true, "activity_regularizer": null}}, {"class_name": "Dense", "config": {"kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "uniform", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "dense_2", "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "activation": "relu", "trainable": true, "kernel_regularizer": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "units": 10, "use_bias": true, "activity_regularizer": null}}, {"class_name": "Dense", "config": {"kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "uniform", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "dense_3", "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "activation": "softmax", "trainable": true, "kernel_regularizer": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "units": 3, "use_bias": true, "activity_regularizer": null}}], "backend": "tensorflow"}'
</pre> Now use Keras to define a model architecture with 2 hidden layers: <pre class="example">
model2 = Sequential()
model2.add(Dense(10, activation='relu', input_shape=(4,)))
model2.add(Dense(10, activation='relu'))
model2.add(Dense(10, activation='relu'))
model2.add(Dense(3, activation='softmax'))
model2.summary()
<pre class="fragment">_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_4 (Dense) (None, 10) 50
_________________________________________________________________
dense_5 (Dense) (None, 10) 110
_________________________________________________________________
dense_6 (Dense) (None, 10) 110
_________________________________________________________________
dense_7 (Dense) (None, 3) 33
=================================================================
Total params: 303
Trainable params: 303
Non-trainable params: 0
</pre>
</pre> Export the model to JSON: <pre class="example">
model2.to_json()
</pre> <pre class="result">
'{"class_name": "Sequential", "keras_version": "2.1.6", "config": [{"class_name": "Dense", "config": {"kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "uniform", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "dense_4", "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "dtype": "float32", "activation": "relu", "trainable": true, "kernel_regularizer": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "units": 10, "batch_input_shape": [null, 4], "use_bias": true, "activity_regularizer": null}}, {"class_name": "Dense", "config": {"kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "uniform", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "dense_5", "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "activation": "relu", "trainable": true, "kernel_regularizer": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "units": 10, "use_bias": true, "activity_regularizer": null}}, {"class_name": "Dense", "config": {"kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "uniform", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "dense_6", "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "activation": "relu", "trainable": true, "kernel_regularizer": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "units": 10, "use_bias": true, "activity_regularizer": null}}, {"class_name": "Dense", "config": {"kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "uniform", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "dense_7", "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "activation": "softmax", "trainable": true, "kernel_regularizer": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "units": 3, "use_bias": true, "activity_regularizer": null}}], "backend": "tensorflow"}'
</pre></li>
<li>Load both models into the architecture table: <pre class="example">
DROP TABLE IF EXISTS model_arch_library;
SELECT madlib.load_keras_model('model_arch_library', -- Output table,
$$
{"class_name": "Sequential", "keras_version": "2.1.6", "config": [{"class_name": "Dense", "config": {"kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "uniform", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "dense_1", "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "dtype": "float32", "activation": "relu", "trainable": true, "kernel_regularizer": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "units": 10, "batch_input_shape": [null, 4], "use_bias": true, "activity_regularizer": null}}, {"class_name": "Dense", "config": {"kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "uniform", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "dense_2", "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "activation": "relu", "trainable": true, "kernel_regularizer": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "units": 10, "use_bias": true, "activity_regularizer": null}}, {"class_name": "Dense", "config": {"kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "uniform", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "dense_3", "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "activation": "softmax", "trainable": true, "kernel_regularizer": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "units": 3, "use_bias": true, "activity_regularizer": null}}], "backend": "tensorflow"}
$$
::json, -- JSON blob
NULL, -- Weights
'Sophie', -- Name
'MLP with 1 hidden layer' -- Descr
);
SELECT madlib.load_keras_model('model_arch_library', -- Output table,
$$
{"class_name": "Sequential", "keras_version": "2.1.6", "config": [{"class_name": "Dense", "config": {"kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "uniform", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "dense_4", "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "dtype": "float32", "activation": "relu", "trainable": true, "kernel_regularizer": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "units": 10, "batch_input_shape": [null, 4], "use_bias": true, "activity_regularizer": null}}, {"class_name": "Dense", "config": {"kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "uniform", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "dense_5", "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "activation": "relu", "trainable": true, "kernel_regularizer": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "units": 10, "use_bias": true, "activity_regularizer": null}}, {"class_name": "Dense", "config": {"kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "uniform", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "dense_6", "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "activation": "relu", "trainable": true, "kernel_regularizer": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "units": 10, "use_bias": true, "activity_regularizer": null}}, {"class_name": "Dense", "config": {"kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "uniform", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "dense_7", "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "activation": "softmax", "trainable": true, "kernel_regularizer": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "units": 3, "use_bias": true, "activity_regularizer": null}}], "backend": "tensorflow"}
$$
::json, -- JSON blob
NULL, -- Weights
'Maria', -- Name
'MLP with 2 hidden layers' -- Descr
);
SELECT model_id, name, description FROM model_arch_library ORDER BY model_id;
</pre> <pre class="result">
model_id | name | description
----------+--------+--------------------------
1 | Sophie | MLP with 1 hidden layer
2 | Maria | MLP with 2 hidden layers
(2 rows)
</pre></li>
<li>Load model selection table. Select the model(s) from the model architecture table that you want to run, along with the compile and fit parameters. Unique combinations will be created: <pre class="example">
DROP TABLE IF EXISTS mst_table, mst_table_summary;
SELECT madlib.load_model_selection_table('model_arch_library', -- model architecture table
'mst_table', -- model selection table output
ARRAY[1,2], -- model ids from model architecture table
ARRAY[ -- compile params
$$loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']$$,
$$loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy']$$,
$$loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy']$$
],
ARRAY[ -- fit params
$$batch_size=4,epochs=1$$,
$$batch_size=8,epochs=1$$
]
);
SELECT * FROM mst_table ORDER BY mst_key;
</pre> <pre class="result">
mst_key | model_id | compile_params | fit_params
---------+---------------+---------------------------------------------------------------------------------+-----------------------
1 | 1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy'] | batch_size=4,epochs=1
2 | 1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy'] | batch_size=8,epochs=1
3 | 1 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=4,epochs=1
4 | 1 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=8,epochs=1
5 | 1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=4,epochs=1
6 | 1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=8,epochs=1
7 | 2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy'] | batch_size=4,epochs=1
8 | 2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy'] | batch_size=8,epochs=1
9 | 2 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=4,epochs=1
10 | 2 | loss='categorical_crossentropy', optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=8,epochs=1
11 | 2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=4,epochs=1
12 | 2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=8,epochs=1
(12 rows)
</pre> The name of the model architecture table is stored in the summary table: <pre class="example">
SELECT * FROM mst_table_summary;
</pre> <pre class="result">
model_arch_table
--------------------+
model_arch_library
(1 row)
</pre></li>
<li>Create model selection table manually. If you would like to have more control over the set of model selection parameters to run, you can manually create the model selection table and the associated summary table. Both must be created since they are needed by the multiple model fit module. For example, let's say we don't want all combinations but only want batch_size=4 for model_id=1 and batch_size=8 for model_id=2: <pre class="example">
DROP TABLE IF EXISTS mst_table_manual;
CREATE TABLE mst_table_manual(
mst_key serial,
model_id integer,
compile_params varchar,
fit_params varchar
);
INSERT INTO mst_table_manual(model_id, compile_params, fit_params) VALUES
(1, $$loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']$$, 'batch_size=4,epochs=1'),
(1, $$loss='categorical_crossentropy',optimizer='Adam(lr=0.01)',metrics=['accuracy']$$, 'batch_size=4,epochs=1'),
(1, $$loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy']$$, 'batch_size=4,epochs=1'),
(2, $$loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy']$$, 'batch_size=8,epochs=1'),
(2, $$loss='categorical_crossentropy',optimizer='Adam(lr=0.01)',metrics=['accuracy']$$, 'batch_size=8,epochs=1'),
(2, $$loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy']$$, 'batch_size=8,epochs=1');
SELECT * FROM mst_table_manual ORDER BY mst_key;
</pre> <pre class="result">
mst_key | model_id | compile_params | fit_params
---------+---------------+---------------------------------------------------------------------------------+-----------------------
1 | 1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy'] | batch_size=4,epochs=1
2 | 1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=4,epochs=1
3 | 1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=4,epochs=1
4 | 2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy'] | batch_size=8,epochs=1
5 | 2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.01)',metrics=['accuracy'] | batch_size=8,epochs=1
6 | 2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=8,epochs=1
(6 rows)
</pre> Create the summary table which must be named with the model selection output table appended by "_summary": <pre class="example">
DROP TABLE IF EXISTS mst_table_manual_summary;
CREATE TABLE mst_table_manual_summary (
model_arch_table varchar
);
INSERT INTO mst_table_manual_summary(model_arch_table) VALUES
('model_arch_library');
SELECT * FROM mst_table_manual_summary;
</pre> <pre class="result">
model_arch_table
--------------------+
model_arch_library
(1 row)
</pre></li>
<li>Generate hyperparameters automatically. You can use other libraries or methods to generate hyperparameters according to the tests that you want to run. For example, let's randomly generate batch size from powers of 2 and learning rate on a log scale. We use psycopg which is a PostgreSQL database adapter for the Python programming language. <pre class="example">
import numpy as np
import psycopg2 as p2
conn = p2.connect('postgresql://gpadmin@35.239.240.26:5432/madlib')
#conn = p2.connect('postgresql://fmcquillan:5432/madlib')
cur = conn.cursor()
#
sql DROP TABLE IF EXISTS mst_table_auto, mst_table_auto_summary;
#
#compile params
learning_rate = np.random.permutation([0.1,0.01,0.001,0.0001])[:3]
compile_param1 = "loss='categorical_crossentropy',optimizer='Adam(lr=" + str(learning_rate[0]) + ")',metrics=['accuracy']"
compile_param2 = "loss='categorical_crossentropy',optimizer='Adam(lr=" + str(learning_rate[1]) + ")',metrics=['accuracy']"
compile_param3 = "loss='categorical_crossentropy',optimizer='Adam(lr=" + str(learning_rate[2]) + ")',metrics=['accuracy']"
compile_params = [compile_param1,compile_param2,compile_param3]
#
#fit params
batch_size = np.random.permutation([4,8,16,32,64])[:2]
fit_param1 = "batch_size=" + str(batch_size[0]) + ",epochs=1"
fit_param2 = "batch_size=" + str(batch_size[1]) + ",epochs=1"
fit_params = [fit_param1,fit_param2]
#
query = "SELECT madlib.load_model_selection_table('model_arch_library', 'mst_table_auto', ARRAY[1,2], %s, %s);"
#
cur.execute(query,[compile_params, fit_params])
conn.commit()
#
# review model selection table
sql SELECT * FROM mst_table_auto ORDER BY mst_key;
</pre> <pre class="result">
mst_key | model_id | compile_params | fit_params
---------+---------------+----------------------------------------------------------------------------------+-----------------------
1 | 1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.0001)',metrics=['accuracy'] | batch_size=4,epochs=1
2 | 1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.0001)',metrics=['accuracy'] | batch_size=8,epochs=1
3 | 1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy'] | batch_size=4,epochs=1
4 | 1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy'] | batch_size=8,epochs=1
5 | 1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=4,epochs=1
6 | 1 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=8,epochs=1
7 | 2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.0001)',metrics=['accuracy'] | batch_size=4,epochs=1
8 | 2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.0001)',metrics=['accuracy'] | batch_size=8,epochs=1
9 | 2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy'] | batch_size=4,epochs=1
10 | 2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.1)',metrics=['accuracy'] | batch_size=8,epochs=1
11 | 2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=4,epochs=1
12 | 2 | loss='categorical_crossentropy',optimizer='Adam(lr=0.001)',metrics=['accuracy'] | batch_size=8,epochs=1
(12 rows)
</pre> The name of the model architecture table is stored in the summary table: <pre class="example">
SELECT * FROM mst_table_auto_summary;
</pre> <pre class="result">
model_arch_table
--------------------+
model_arch_library
(1 row)
</pre></li>
</ol>
</dd></dl>
<p><a class="anchor" id="notes"></a></p><dl class="section user"><dt>Notes</dt><dd></dd></dl>
<ol type="1">
<li>In this method, the same compile and fit parameters are applied to all model architectures when generating combinations. However, you may wish to have different compile and fit parameters for each model. To do so, call 'load_model_selection_table' multiple times - once for each model. Then you can combine the resulting tables using UNION or other means. Note that the 'mst_key' must be unique so you will need to regenerate it in your final combined table.</li>
</ol>
<p><a class="anchor" id="related"></a></p><dl class="section user"><dt>Related Topics</dt><dd></dd></dl>
<p>See <a class="el" href="keras__model__arch__table_8sql__in.html">keras_model_arch_table.sql_in</a> </p>
</div><!-- contents -->
</div><!-- doc-content -->
<!-- start footer part -->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
<ul>
<li class="footer">Generated on Mon Apr 6 2020 21:46:59 for MADlib by
<a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.13 </li>
</ul>
</div>
</body>
</html>