blob: 05ad8873a287b8ad437235d90035cb8fe4f21a80 [file] [log] [blame]
/* ----------------------------------------------------------------------- *//**
*
* @file lda.sql_in
*
* @brief SQL functions for Latent Dirichlet Allocation
* @date Dec 2012
*
* @sa For an introduction to Latent Dirichlet Allocation models, see the
module description \ref grp_lda.
*
*//* ------------------------------------------------------------------------*/
m4_include(`SQLCommon.m4')
/**
@addtogroup grp_lda
<div class="toc"><b>Contents</b>
<ul>
<li><a href="#vocabulary">Vocabulary Format</a></li>
<li><a href="#train">Training Function</a></li>
<li><a href="#predict">Prediction Function</a></li>
<li><a href="#examples">Examples</a></li>
<li><a href="#notes">Notes</a></li>
<li><a href="#literature">Literature</a></li>
<li><a href="#related">Related Topics</a><li>
</ul>
</div>
@brief Generates a Latent Dirichlet Allocation predictive model for a collection of documents.
@warning Due to change in internal structure of an LDA model in v1.8, model tables
computed by MADlib versions prior to v1.8 cannot be used with the v1.8 prediction function.
<hr>
Latent Dirichlet Allocation (LDA) is an interesting generative probabilistic
model for natural texts and has received a lot of attention in recent years.
The model is quite versatile, having found uses in problems like automated
topic discovery, collaborative filtering, and document classification.
The LDA model posits that each document is associated with a mixture of various
topics (e.g. a document is related to Topic 1 with probability 0.7, and Topic 2
with probability 0.3), and that each word in the document is attributable to
one of the document's topics. There is a (symmetric) Dirichlet prior with
parameter \f$ \alpha \f$ on each document's topic mixture. In addition, there
is another (symmetric) Dirichlet prior with parameter \f$ \beta \f$ on the
distribution of words for each topic.
The following generative process then defines a distribution over a corpus of
documents.
- Sample for each topic \f$ i \f$, a per-topic word
distribution \f$ \phi_i \f$ from the Dirichlet(\f$\beta\f$) prior.
- For each document:
- Sample a document length N from a suitable distribution, say, Poisson.
- Sample a topic mixture \f$ \theta \f$ for the document from the
Dirichlet(\f$\alpha\f$) distribution.
- For each of the N words:
- Sample a topic \f$ z_n \f$ from the multinomial topic distribution \f$
\theta \f$.
- Sample a word \f$ w_n \f$ from the multinomial word distribution \f$
\phi_{z_n} \f$ associated with topic \f$ z_n \f$.
In practice, only the words in each document are observable. The topic mixture
of each document and the topic for each word in each document are latent
unobservable variables that need to be inferred from the observables, and this
is the problem people refer to when they talk about the inference problem for
LDA. Exact inference is intractable, but several approximate inference
algorithms for LDA have been developed. The simple and effective Gibbs sampling
algorithm described in Griffiths and Steyvers [2] appears to be the current
algorithm of choice.
This implementation provides a parallel and scalable in-database solution for
LDA based on Gibbs sampling. Different with the implementations based on MPI or
Hadoop Map/Reduce, this implementation builds upon the shared-nothing MPP
databases and enables high-performance in-database analytics.
@anchor vocabulary
@par Vocabulary Format
The vocabulary, or dictionary, indexes all the words found in the corpus and has the following format:
<pre>{TABLE|VIEW} <em>vocab_table</em> (
<em>wordid</em> INTEGER,
<em>word</em> TEXT
)</pre>
where \c wordid refers the word ID (the index of a word in the vocabulary) and
\c word is the actual word.
@usage
- The training (i.e. topic inference) can be done with the following function:
<pre>
SELECT \ref lda_train(
<em>'data_table'</em>,
<em>'model_table'</em>,
<em>'output_data_table'</em>,
<em>voc_size</em>,
<em>topic_num</em>,
<em>iter_num</em>,
<em>alpha</em>,
<em>beta</em>)
</pre>
This function stores the resulting model in <tt><em>model_table</em></tt>.
The table has only 1 row and is in the following form:
<pre>{TABLE} <em>model_table</em> (
<em>voc_size</em> INTEGER,
<em>topic_num</em> INTEGER,
<em>alpha</em> FLOAT,
<em>beta</em> FLOAT,
<em>model</em> BIGINT[])
</pre>
This function also stores the topic counts and the topic assignments in
each document in <tt><em>output_data_table</em></tt>. The table is in the
following form:
<pre>{TABLE} <em>output_data_table</em> (
<em>docid</em> INTEGER,
<em>wordcount</em> INTEGER,
<em>words</em> INTEGER[],
<em>counts</em> INTEGER[],
<em>topic_count</em> INTEGER[],
<em>topic_assignment</em> INTEGER[])
</pre>
- The prediction (i.e. labelling of test documents using a learned LDA model)
can be done with the following function:
<pre>
SELECT \ref lda_predict(
<em>'data_table'</em>,
<em>'model_table'</em>,
<em>'output_table'</em>);
</pre>
This function stores the prediction results in
<em>output_table</em>. Each row in the table stores the topic
distribution and the topic assignments for a docuemnt in the dataset.
The table is in the following form:
<pre>{TABLE} <em>output_table</em> (
<em>docid</em> INTEGER,
<em>wordcount</em> INTEGER,
<em>words</em> INTEGER,
<em>counts</em> INTEGER,
<em>topic_count</em> INTEGER[],
<em>topic_assignment</em> INTEGER[])
</pre>
- This module also provides a function for computing the perplexity:
<pre>
SELECT \ref lda_get_perplexity(
<em>'model_table'</em>,
<em>'output_data_table'</em>);
</pre>
@implementation
The input format requires the user to tokenize each document into an array of
words. This process involves tokenizing and filtering documents - a process
out-of-scope for this module. Internally, the input data will be
validated and then converted to the following format for efficiency:
<pre>{TABLE} <em>__internal_data_table__</em> (
<em>docid</em> INTEGER,
<em>wordcount</em> INTEGER,
<em>words</em> INTEGER[],
<em>counts</em> INTEGER[])
</pre>
where \c docid is the document ID, \c wordcount is the number of words in the
document, \c words is the list of unique words in the document, and \c counts
is a list of the number of occurrences of each unique word in the document.
@anchor train
@par Training Function
The LDA training function has the following syntax.
<pre class="syntax">
lda_train( data_table,
model_table,
output_data_table,
voc_size,
topic_num,
iter_num,
alpha,
beta
)
</pre>
\b Arguments
<dl class="arglist">
<dt>data_table</dt>
<dd>TEXT. The name of the table storing the training dataset. Each row is
in the form <tt>&lt;docid, wordid, count&gt;</tt> where \c docid, \c wordid, and \c count
are non-negative integers.
The \c docid column refers to the document ID, the \c wordid column is the
word ID (the index of a word in the vocabulary), and \c count is the
number of occurrences of the word in the document. </dd>
<dt>model_table</dt>
<dd>TEXT. The name of the table storing the learned models. This table has one row and the following columns.
<table class="output">
<tr>
<th>voc_size</th>
<td>INTEGER. Size of the vocabulary. Note that the \c wordid should be continous integers starting from 0 to \c voc_size &minus; \c 1. A data validation routine is called to validate the dataset.</td>
</tr>
<tr>
<th>topic_num</th>
<td>INTEGER. Number of topics.</td>
</tr>
<tr>
<th>alpha</th>
<td>DOUBLE PRECISION. Dirichlet parameter for the per-doc topic multinomial (e.g. 50/topic_num).</td>
</tr>
<tr>
<th>beta</th>
<td>DOUBLE PRECISION. Dirichlet parameter for the per-topic word multinomial (e.g. 0.01).</td>
</tr>
<tr>
<th>model</th>
<td>BIGINT[].</td>
</tr>
</table>
</dd>
<dt>output_data_table</dt>
<dd>TEXT. The name of the table to store the output data. It has the following columns:
<table class="output">
<tr>
<th>docid</th>
<td>INTEGER.</td>
</tr>
<tr>
<th>wordcount</th>
<td>INTEGER.</td>
</tr>
<tr>
<th>words</th>
<td>INTEGER[].</td>
</tr>
<tr>
<th>counts</th>
<td>INTEGER[].</td>
</tr>
<tr>
<th>topic_count</th>
<td>INTEGER[].</td>
</tr>
<tr>
<th>topic_assignment</th>
<td>INTEGER[].</td>
</tr>
</table>
</dd>
<dt>iter_num</dt>
<dd>INTEGER. Number of iterations (e.g. 60).</dd>
<dt>alpha</dt>
<dd>DOUBLE PRECISION. Dirichlet parameter for the per-doc topic multinomial (e.g. 50/topic_num).</dd>
<dt>beta</dt>
<dd>DOUBLE PRECISION. Dirichlet parameter for the per-topic word multinomial (e.g. 0.01).</dd>
</dl>
@anchor predict
@par Prediction Function
Prediction&mdash;labelling test documents using a learned LDA model&mdash;is accomplished with the following function:
<pre class="syntax">
lda_predict( data_table,
model_table,
output_table
);
</pre>
This function stores the prediction results in
<tt><em>output_table</em></tt>. Each row in the table stores the topic
distribution and the topic assignments for a document in the dataset. The
table has the following columns:
<table class="output">
<tr>
<th>docid</th>
<td>INTEGER.</td>
</tr>
<tr>
<th>wordcount</th>
<td>INTEGER.</td>
</tr>
<tr>
<th>words</th>
<td>INTEGER[]. List of word IDs in this document.</td>
</tr>
<tr>
<th>counts</th>
<td>INTEGER[]. List of word counts in this document.</td>
</tr>
<tr>
<th>topic_count</th>
<td>INTEGER[]. Of length topic_num, list of topic counts in this document.</td>
</tr>
<tr>
<th>topic_assignment</th>
<td>INTEGER[]. Of length wordcount, list of topic index for each word.</td>
</tr>
</table>
@anchor perplexity
@par Perplexity Function
This module provides a function for computing the perplexity.
<pre class="syntax">
lda_get_perplexity( model_table,
output_data_table
);
</pre>
@anchor examples
@examp
-# Prepare a training dataset for LDA. The examples below are small strings extracted from various Wikipedia documents .
<pre class="example">
CREATE TABLE documents(docid INT4, contents TEXT);
INSERT INTO documents VALUES
(0, 'Statistical topic models are a class of Bayesian latent variable models, originally developed for analyzing the semantic content of large document corpora.'),
(1, 'By the late 1960s, the balance between pitching and hitting had swung in favor of the pitchers. In 1968 Carl Yastrzemski won the American League batting title with an average of just .301, the lowest in history.'),
(2, 'Machine learning is closely related to and often overlaps with computational statistics; a discipline that also specializes in prediction-making. It has strong ties to mathematical optimization, which deliver methods, theory and application domains to the field.'),
(3, 'California''s diverse geography ranges from the Sierra Nevada in the east to the Pacific Coast in the west, from the Redwood–Douglas fir forests of the northwest, to the Mojave Desert areas in the southeast. The center of the state is dominated by the Central Valley, a major agricultural area. ')
</pre>
-# Build a word count table by extracting the words and building a histogram for
each document using the \c term_frequency function (\ref grp_text_utilities).
<pre class="example">
-- Convert a string to a list of words
ALTER TABLE documents ADD COLUMN words TEXT[];
UPDATE documents SET words = regexp_split_to_array(lower(contents), E'[\\\\s+\\\\.\\\\,]');
\nbsp
-- Create the term frequency table
DROP TABLE IF EXISTS my_training;
SELECT madlib.term_frequency('documents', 'docid', 'words', 'my_training', TRUE);
SELECT * FROM my_training order by docid limit 20;
</pre>
<pre class="result">
docid | wordid | count
-------+--------+-------
0 | 57 | 1
0 | 86 | 1
0 | 4 | 1
0 | 55 | 1
0 | 69 | 2
0 | 81 | 1
0 | 30 | 1
0 | 33 | 1
0 | 36 | 1
0 | 43 | 1
0 | 25 | 1
0 | 65 | 2
0 | 72 | 1
0 | 9 | 1
0 | 0 | 2
0 | 29 | 1
0 | 18 | 1
0 | 12 | 1
0 | 96 | 1
0 | 91 | 1
(20 rows)
</pre>
<pre class="example">
SELECT * FROM my_training_vocabulary order by wordid limit 20;
</pre>
<pre class="result">
wordid | word
--------+--------------
0 |
1 | 1960s
2 | 1968
3 | 301
4 | a
5 | agricultural
6 | also
7 | american
8 | an
9 | analyzing
10 | and
11 | application
12 | are
13 | area
14 | areas
15 | average
16 | balance
17 | batting
18 | bayesian
19 | between
(20 rows)
</pre>
-# Create an LDA model using the \c lda_train() function.
<pre class="example">
SELECT madlib.lda_train( 'my_training',
'my_model',
'my_outdata',
104,
5,
10,
5,
0.01
);
</pre>
After a successful run of the lda_train() function two tables are generated,
one for storing the learned model and the other for storing the output data table.
-# To get the detailed information about the learned model, run these commands:
<pre class="example">
-- The topic description by top-k words
SELECT madlib.lda_get_topic_desc( 'my_model',
'my_training_vocabulary',
'my_topic_desc',
15);
select * from my_topic_desc order by topicid, prob DESC;
</pre>
<pre class="result">
topicid | wordid | prob | word
---------+--------+--------------------+-------------------
1 | 69 | 0.181900726392252 | of
1 | 52 | 0.0608353510895884 | is
1 | 65 | 0.0608353510895884 | models
1 | 30 | 0.0305690072639225 | corpora
1 | 1 | 0.0305690072639225 | 1960s
1 | 57 | 0.0305690072639225 | latent
1 | 35 | 0.0305690072639225 | diverse
1 | 81 | 0.0305690072639225 | semantic
1 | 19 | 0.0305690072639225 | between
1 | 75 | 0.0305690072639225 | pitchers
1 | 43 | 0.0305690072639225 | for
1 | 6 | 0.0305690072639225 | also
1 | 40 | 0.0305690072639225 | favor
1 | 47 | 0.0305690072639225 | had
1 | 28 | 0.0305690072639225 | computational
....
</pre>
\nbsp
<pre class="example">
-- The per-word topic counts (sorted by topic id)
SELECT madlib.lda_get_word_topic_count( 'my_model',
'my_word_topic_count');
</pre>
\nbsp
<pre class="result">
wordid | topic_count
--------+--------------
0 | {0,17,0,0,0}
1 | {1,0,0,0,0}
2 | {0,0,0,0,1}
3 | {0,0,0,0,1}
4 | {0,0,0,0,3}
5 | {0,1,0,0,0}
6 | {1,0,0,0,0}
7 | {1,0,0,0,0}
8 | {0,0,0,1,0}
9 | {1,0,0,0,0}
10 | {0,0,0,0,3}
11 | {0,0,1,0,0}
....
</pre>
-# To get the topic counts and the topic assignments for each doucment, run the following commands:
<pre class="example">
-- The per-document topic assignments and counts:
SELECT docid, topic_assignment, topic_count FROM my_outdata;
</pre>
<pre class="result">
docid | topic_assignment | topic_count
-------+-----------------------------------------------------------------------------------------------------------------+----------------
1 | {1,1,1,1,1,1,2,4,1,4,4,4,1,0,2,1,0,2,2,3,4,2,1,1,4,2,4,3,0,0,2,4,4,3,3,3,3,3,0,1,0,4} | {6,12,7,7,10}
3 | {1,1,1,1,1,1,4,0,2,3,1,2,0,0,0,1,2,2,1,3,3,2,2,1,2,2,2,0,3,0,4,1,0,0,1,4,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,4,3} | {8,12,10,21,4}
0 | {1,1,4,2,1,4,4,4,1,3,1,0,0,0,0,0,0,0,0,1,1,3,0,1} | {9,8,1,2,4}
2 | {1,1,1,1,4,1,4,4,2,0,2,4,1,1,4,1,2,0,1,3,1,2,4,3,2,4,4,3,1,2,0,3,3,1,4,3,3,3,2,1} | {3,13,7,8,9}
(4 rows)
</pre>
-# To use a learned LDA model for prediction (that is, to label new documents), use the following command:
<pre class="example">
SELECT madlib.lda_predict( 'my_testing',
'my_model',
'my_pred'
);
</pre>
The test table (<em>my_testing</em>) is expected to be in the same form as the
training table (<em>my_training</em>) and can be created with the same process.
After a successful run of the lda_predict() function, the prediction results
are generated and stored in <em>my_pred</em>. This table has the same
schema as the <em>my_outdata</em> table generated by the lda_train() function.
-# Use the following command to compute the perplexity of the result.
<pre class="example">
SELECT madlib.lda_get_perplexity( 'my_model',
'my_pred'
);
</pre>
@anchor literature
@literature
[1] D.M. Blei, A.Y. Ng, M.I. Jordan, <em>Latent Dirichlet Allocation</em>,
Journal of Machine Learning Research, vol. 3, pp. 993-1022, 2003.
[2] T. Griffiths and M. Steyvers, <em>Finding scientific topics</em>, PNAS,
vol. 101, pp. 5228-5235, 2004.
[3] Y. Wang, H. Bai, M. Stanton, W-Y. Chen, and E.Y. Chang, <em>lda: Parallel
Dirichlet Allocation for Large-scale Applications</em>, AAIM, 2009.
[4] http://en.wikipedia.org/wiki/Latent_Dirichlet_allocation
[5] J. Chang, Collapsed Gibbs sampling methods for topic models, R manual,
2010.
@anchor related
@par Related Topics
File lda.sql_in documenting the SQL functions.
*/
-- UDT for summarizing a UDF call
DROP TYPE IF EXISTS MADLIB_SCHEMA.lda_result CASCADE;
CREATE TYPE MADLIB_SCHEMA.lda_result AS
(
output_table TEXT,
description TEXT
);
/**
* @brief This UDF provides an entry for the lda training process.
* @param data_table Table storing the training dataset, each row is in
* the form of <docid, wordid, count> where docid,
* wordid, and count are all non-negative integers.
* @param model_table Table storing the learned models (voc_size, topic_num,
* alpha, beta, per-word topic counts, and
* corpus-level topic counts)
* @param output_data_table Table storing the output data table in the form of
* <docid, wordcount, words, counts, topic_count,
* topic_assignment>
* @param voc_size Size of the vocabulary (Note that the wordid should
* be continous integers starting from 0 to voc_size -
* 1. A data validation rountine will be called to
* validate the dataset.)
* @param topic_num Number of topics (e.g. 100)
* @param iter_num Number of iterations (e.g. 60)
* @param alpha Dirichlet parameter for the per-doc topic multinomial
* (e.g. 50/topic_num)
* @param beta Dirichlet parameter for the per-topic word multinomial
* (e.g. 0.01)
*/
CREATE OR REPLACE FUNCTION
MADLIB_SCHEMA.lda_train
(
data_table TEXT,
model_table TEXT,
output_data_table TEXT,
voc_size INT4,
topic_num INT4,
iter_num INT4,
alpha FLOAT8,
beta FLOAT8
)
RETURNS SETOF MADLIB_SCHEMA.lda_result AS $$
PythonFunctionBodyOnly(`lda', `lda')
lda.lda_train(
schema_madlib, data_table, model_table, output_data_table, voc_size,
topic_num, iter_num, alpha, beta
)
return [[model_table, 'model table'],
[output_data_table, 'output data table']]
$$ LANGUAGE plpythonu
m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
/**
* @brief This UDF provides an entry for the lda predicton process.
* @param data_table Table storing the testing dataset, each row is in the
* form of <docid, wordid, count>
* where docid, wordid, and count are all non-negative
* integers.
* @param model_table Table storing the learned models
* @param output_table Table storing per-document topic counts and topic
* assignments
* @note default iter_num = 20
*/
CREATE OR REPLACE FUNCTION
MADLIB_SCHEMA.lda_predict
(
data_table TEXT,
model_table TEXT,
output_table TEXT
)
RETURNS SETOF MADLIB_SCHEMA.lda_result AS $$
PythonFunctionBodyOnly(`lda', `lda')
lda.lda_predict(schema_madlib, data_table, model_table, output_table)
return [[
output_table,
'per-doc topic distribution and per-word topic assignments']]
$$ LANGUAGE plpythonu
m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
/**
* @brief A overloaded version which allows users to specify iter_num.
*/
CREATE OR REPLACE FUNCTION
MADLIB_SCHEMA.lda_predict
(
data_table TEXT,
model_table TEXT,
output_table TEXT,
iter_num INT4
)
RETURNS SETOF MADLIB_SCHEMA.lda_result AS $$
PythonFunctionBodyOnly(`lda', `lda')
lda.lda_predict(
schema_madlib, data_table, model_table, output_table, iter_num)
return [[
output_table,
'per-doc topic distribution and per-word topic assignments']]
$$ LANGUAGE PLPYTHONU STRICT
m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
/**
* @brief This UDF computes the per-topic word counts.
* @param model_table The model table generated by the training process
* @param output_table The output table storing the per-topic word counts
*/
CREATE OR REPLACE FUNCTION
MADLIB_SCHEMA.lda_get_topic_word_count
(
model_table TEXT,
output_table TEXT
)
RETURNS SETOF MADLIB_SCHEMA.lda_result AS $$
PythonFunctionBodyOnly(`lda', `lda')
lda.get_topic_word_count(schema_madlib, model_table, output_table)
return [[output_table, 'per-topic word counts']]
$$ LANGUAGE plpythonu STRICT
m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
/**
* @brief This UDF computes the per-word topic counts.
* @param model_table The model table generated by the training process
* @param dist_table The output table storing the per-word topic counts
*/
CREATE OR REPLACE FUNCTION
MADLIB_SCHEMA.lda_get_word_topic_count
(
model_table TEXT,
output_table TEXT
)
RETURNS SETOF MADLIB_SCHEMA.lda_result AS $$
PythonFunctionBodyOnly(`lda', `lda')
lda.get_word_topic_count(schema_madlib, model_table, output_table)
return [[output_table, 'per-word topic counts']]
$$ LANGUAGE plpythonu STRICT
m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
/**
* @brief This UDF gets the description for each topic (top-k words)
* @param model_table The model table generated by the training process
* @param vocab_table The vocabulary table (<wordid, word>)
* @param top_k The number of top words for each topic description
* @param desc_table The output table for storing the per-topic description
*/
CREATE OR REPLACE FUNCTION
MADLIB_SCHEMA.lda_get_topic_desc
(
model_table TEXT,
vocab_table TEXT,
desc_table TEXT,
top_k INT4
)
RETURNS SETOF MADLIB_SCHEMA.lda_result AS $$
PythonFunctionBodyOnly(`lda', `lda')
lda.get_topic_desc(
schema_madlib, model_table, vocab_table, desc_table, top_k)
return [[
desc_table,
"""topic description, use "ORDER BY topicid, prob DESC" to check the
results"""]]
$$ LANGUAGE plpythonu STRICT
m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
/**
* @brief This UDF assigns topics to words in a document randomly.
* @param word_count The number of words in the document
* @param topic_num The number of topics (specified by the user)
* @return The topic counts and topic assignments
*/
CREATE OR REPLACE FUNCTION
MADLIB_SCHEMA.__lda_random_assign
(
word_count INT4,
topic_num INT4
)
RETURNS INT4[]
AS 'MODULE_PATHNAME', 'lda_random_assign'
LANGUAGE C STRICT
m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
/**
* @brief This UDF learns the topics of words in a document and is the main
* step of a Gibbs sampling iteration. The model parameter (including the
* per-word topic counts and corpus-level topic counts) is passed to this
* function in the first call and then transfered to the rest calls through
* fcinfo->flinfo->fn_extra to allow the immediate update.
* @param words The set of unique words in the document
* @param counts The counts of each unique words in the document
* (sum(counts) = word_count)
* @param doc_topic The current per-doc topic counts and topic
* assignments
* @param model The current model (including the per-word topic counts
* and the corpus-level topic counts)
* @param alpha The Dirichlet parameter for per-document topic multinomial
* @param beta The Dirichlet parameter for per-topic word multinomial
* @param voc_size The size of vocabulary
* @param topic_num The number of topics
* @param iter_num The number of iterations
* @return The learned topic counts and topic assignments
*/
CREATE OR REPLACE FUNCTION
MADLIB_SCHEMA.__lda_gibbs_sample
(
words INT4[],
counts INT4[],
doc_topic INT4[],
model INT8[],
alpha FLOAT8,
beta FLOAT8,
voc_size INT4,
topic_num INT4,
iter_num INT4
)
RETURNS INT4[]
AS 'MODULE_PATHNAME', 'lda_gibbs_sample'
LANGUAGE C
m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
/**
* @brief This UDF is the sfunc for the aggregator computing the topic counts
* for each word and the topic count in the whole corpus. It scans the topic
* assignments in a document and updates the topic counts.
* @param state The topic counts
* @param words The unique words in the document
* @param counts The counts of each unique words in the document
* (sum(counts) = word_count)
* @param topic_assignment The topic assignments in the document
* @param voc_size The size of vocabulary
* @param topic_num The number of topics
* @return The updated state
*/
CREATE OR REPLACE FUNCTION
MADLIB_SCHEMA.__lda_count_topic_sfunc
(
state INT8[],
words INT4[],
counts INT4[],
topic_assignment INT4[],
voc_size INT4,
topic_num INT4
)
RETURNS INT8[]
AS 'MODULE_PATHNAME', 'lda_count_topic_sfunc'
LANGUAGE C
m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
/**
* @brief This UDF is the prefunc for the aggregator computing the per-word
* topic counts.
* @param state1 The local word topic counts
* @param state2 The local word topic counts
* @return The element-wise sum of two local states
*/
CREATE OR REPLACE FUNCTION
MADLIB_SCHEMA.__lda_count_topic_prefunc
(
state1 INT8[],
state2 INT8[]
)
RETURNS INT8[]
AS 'MODULE_PATHNAME', 'lda_count_topic_prefunc'
LANGUAGE C IMMUTABLE STRICT
m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
/**
* @brief This uda computes the word topic counts by scanning and summing
* up topic assignments in each document.
* @param words The unique words in the document
* @param counts The counts of each unique words in the document
* @param topic_assignment The topic assignments in the document
* @param voc_size The size of vocabulary
* @param topic_num The number of topics
* @return The word topic counts (a 1-d array embeding a 2-d array)
*/
DROP AGGREGATE IF EXISTS
MADLIB_SCHEMA.__lda_count_topic_agg
(
INT4[],
INT4[],
INT4[],
INT4,
INT4
);
CREATE AGGREGATE
MADLIB_SCHEMA.__lda_count_topic_agg
(
INT4[],
INT4[],
INT4[],
INT4,
INT4
)
(
stype = INT8[],
sfunc = MADLIB_SCHEMA.__lda_count_topic_sfunc
m4_ifdef(
`__POSTGRESQL__', `',
`, prefunc = MADLIB_SCHEMA.__lda_count_topic_prefunc'
)
);
/**
* @brief This UDF computes the perplexity given the output data table and the
* model table.
* @param model_table The model table generated by lda_train
* @param output_data_table The output data table generated by lda_predict
* @return The perplexity
*/
CREATE OR REPLACE FUNCTION
MADLIB_SCHEMA.lda_get_perplexity
(
model_table TEXT,
output_data_table TEXT
)
RETURNS FLOAT8 AS $$
PythonFunctionBodyOnly(`lda', `lda')
return lda.get_perplexity(
schema_madlib, model_table, output_data_table)
$$ LANGUAGE plpythonu STRICT
m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `READS SQL DATA', `');
/**
* @brief This UDF is the sfunc for the aggregator computing the perpleixty.
* @param state The cached model plus perplexity
* @param words The unique words in the document
* @param counts The counts of each unique words in the document
* @param doc_topic The topic counts in the document
* @param model The learned model
* @param alpha The Dirichlet parameter for per-document topic multinomial
* @param beta The Dirichlet parameter for per-topic word multinomial
* @param voc_size The size of vocabulary
* @param topic_num The number of topics
* @return The updated state
*/
CREATE OR REPLACE FUNCTION
MADLIB_SCHEMA.__lda_perplexity_sfunc
(
state INT8[],
words INT4[],
counts INT4[],
doc_topic INT4[],
model INT8[],
alpha FLOAT8,
beta FLOAT8,
voc_size INT4,
topic_num INT4
)
RETURNS INT8[]
AS 'MODULE_PATHNAME', 'lda_perplexity_sfunc'
LANGUAGE C IMMUTABLE
m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
/**
* @brief This UDF is the prefunc for the aggregator computing the perplexity.
* @param state1 The local state
* @param state2 The local state
* @return The merged state
*/
CREATE OR REPLACE FUNCTION
MADLIB_SCHEMA.__lda_perplexity_prefunc
(
state1 INT8[],
state2 INT8[]
)
RETURNS INT8[]
AS 'MODULE_PATHNAME', 'lda_perplexity_prefunc'
LANGUAGE C IMMUTABLE STRICT
m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
/**
* @brief This UDF is the finalfunc for the aggregator computing the perplexity.
* @param state The merged state
* @return The perpleixty
*/
CREATE OR REPLACE FUNCTION
MADLIB_SCHEMA.__lda_perplexity_ffunc
(
state INT8[]
)
RETURNS FLOAT8
AS 'MODULE_PATHNAME', 'lda_perplexity_ffunc'
LANGUAGE C IMMUTABLE STRICT
m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
/*
* @brief This aggregator computes the perpleixty.
* @param words The unique words in the document
* @param counts The counts of each unique words in the document
* @param doc_topic The topic counts in the document
* @param model The learned model
* @param alpha The Dirichlet parameter for per-document topic multinomial
* @param beta The Dirichlet parameter for per-topic word multinomial
* @param voc_size The size of vocabulary
* @param topic_num The number of topics
* @return The updated perplexity
*/
DROP AGGREGATE IF EXISTS
MADLIB_SCHEMA.__lda_perplexity_agg
(
INT4[],
INT4[],
INT4[],
INT8[],
FLOAT8,
FLOAT8,
INT4,
INT4
);
CREATE AGGREGATE
MADLIB_SCHEMA.__lda_perplexity_agg
(
INT4[],
INT4[],
INT4[],
INT8[],
FLOAT8,
FLOAT8,
INT4,
INT4
)
(
stype = INT8[],
sfunc = MADLIB_SCHEMA.__lda_perplexity_sfunc,
finalfunc = MADLIB_SCHEMA.__lda_perplexity_ffunc
m4_ifdef(
`__POSTGRESQL__', `',
`, prefunc = MADLIB_SCHEMA.__lda_perplexity_prefunc'
)
);
CREATE OR REPLACE FUNCTION
MADLIB_SCHEMA.__lda_check_count_ceiling
(
arr INT8[],
r INT4,
c INT4
)
RETURNS INT4[]
AS 'MODULE_PATHNAME', 'lda_check_count_ceiling'
LANGUAGE C IMMUTABLE STRICT
m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
/**
* @brief Unnest a 2-D array into a set of 1-D arrays
* @param arr The 2-D array to be unnested
* @return The unnested 1-D arrays
*/
CREATE OR REPLACE FUNCTION
MADLIB_SCHEMA.__lda_util_unnest
(
arr INT8[],
r INT4,
c INT4
)
RETURNS SETOF INT4[]
AS 'MODULE_PATHNAME', 'lda_unnest'
LANGUAGE C IMMUTABLE STRICT
m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
CREATE OR REPLACE FUNCTION
MADLIB_SCHEMA.__lda_util_unnest_transpose
(
arr INT8[],
r INT4,
c INT4
)
RETURNS SETOF INT4[]
AS 'MODULE_PATHNAME', 'lda_unnest_transpose'
LANGUAGE C IMMUTABLE STRICT
m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
/**
* @brief Transpose a 2-D array
* @param matrix The input 2-D array
* @param The transposed array
*/
CREATE OR REPLACE FUNCTION
MADLIB_SCHEMA.__lda_util_transpose
(
matrix INT8[][]
)
RETURNS INT8[][]
AS 'MODULE_PATHNAME', 'lda_transpose'
LANGUAGE C IMMUTABLE STRICT
m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
/**
* @brief L1 normalization with smoothing
* @param arr The array to be normalized
* @param smooth The smoothing parameter
* @return The normalized vector
*/
CREATE OR REPLACE FUNCTION
MADLIB_SCHEMA.__lda_util_norm_with_smoothing
(
arr FLOAT8[],
smooth FLOAT8
)
RETURNS FLOAT8[]
AS 'MODULE_PATHNAME', 'l1_norm_with_smoothing'
LANGUAGE C IMMUTABLE STRICT
m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
/**
* @brief This UDF returns the index of elements in a sorted order
* @param arr The array to be sorted
* @return The index of elements
*/
CREATE OR REPLACE FUNCTION
MADLIB_SCHEMA.__lda_util_index_sort
(
arr FLOAT8[]
)
RETURNS INT4[] AS $$
PythonFunctionBodyOnly(`lda', `lda')
return lda.index_sort(arr)
$$ LANGUAGE plpythonu STRICT
m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
/**
* @brief This UDF checks the vocabulary and converts non-continous wordids into
* continuous integers ranging from 0 to voc_size - 1.
* @param vocab_table The vocabulary table in the form of
<wordid::int4, word::text>
* @param output_vocab_table The regularized vocabulary table
*/
CREATE OR REPLACE FUNCTION
MADLIB_SCHEMA.__lda_util_norm_vocab
(
vocab_table TEXT,
output_vocab_table TEXT
)
RETURNS SETOF MADLIB_SCHEMA.lda_result AS $$
PythonFunctionBodyOnly(`lda', `lda')
lda.norm_vocab(vocab_table, output_vocab_table)
return [[output_vocab_table,'normalized vocbulary table']]
$$ LANGUAGE plpythonu STRICT
m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
/**
* @brief This UDF converts the data table according to the normalized
* vocabulary, and all rows with non-positive count values will be removed
* @param data_table The data table to be normalized
* @param vocab_table The normalized vocabulary table
* @param output_data_table The normalized data table
*/
CREATE OR REPLACE FUNCTION
MADLIB_SCHEMA.__lda_util_norm_dataset
(
data_table TEXT,
norm_vocab_table TEXT,
output_data_table TEXT
)
RETURNS SETOF MADLIB_SCHEMA.lda_result AS $$
PythonFunctionBodyOnly(`lda', `lda')
lda.norm_dataset(data_table, norm_vocab_table, output_data_table)
return [[output_data_table,'normalized data table']]
$$ LANGUAGE plpythonu STRICT
m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
/**
* @brief This UDF extracts the list of wordids from the data table and joins
* it with the vocabulary table to get the list of common wordids, next it will
* normalize the vocabulary based on the common wordids and then normalize the
* data table based on the normalized vocabulary.
* @param data_table The data table to be normalized
* @param vocab_table The vocabulary table to be normalized
* @param output_data_table The normalized data table
* @param output_vocab_table The normalized vocabulary table
*/
CREATE OR REPLACE FUNCTION
MADLIB_SCHEMA.__lda_util_conorm_data
(
data_table TEXT,
vocab_table TEXT,
output_data_table TEXT,
output_vocab_table TEXT
)
RETURNS SETOF MADLIB_SCHEMA.lda_result AS $$
PythonFunctionBodyOnly(`lda', `lda')
lda.conorm_data(
data_table, vocab_table, output_data_table, output_vocab_table)
return [[output_data_table,'normalized data table'],
[output_vocab_table,'normalized vocab table']]
$$ LANGUAGE plpythonu STRICT
m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');