| #!/usr/bin/env python |
| # Copyright (c) 2012 Trent Mick. |
| # Copyright (c) 2007-2008 ActiveState Corp. |
| # License: MIT (http://www.opensource.org/licenses/mit-license.php) |
| |
| from __future__ import generators |
| |
| r"""A fast and complete Python implementation of Markdown. |
| |
| [from http://daringfireball.net/projects/markdown/] |
| > Markdown is a text-to-HTML filter; it translates an easy-to-read / |
| > easy-to-write structured text format into HTML. Markdown's text |
| > format is most similar to that of plain text email, and supports |
| > features such as headers, *emphasis*, code blocks, blockquotes, and |
| > links. |
| > |
| > Markdown's syntax is designed not as a generic markup language, but |
| > specifically to serve as a front-end to (X)HTML. You can use span-level |
| > HTML tags anywhere in a Markdown document, and you can use block level |
| > HTML tags (like <div> and <table> as well). |
| |
| Module usage: |
| |
| >>> import markdown2 |
| >>> markdown2.markdown("*boo!*") # or use `html = markdown_path(PATH)` |
| u'<p><em>boo!</em></p>\n' |
| |
| >>> markdowner = Markdown() |
| >>> markdowner.convert("*boo!*") |
| u'<p><em>boo!</em></p>\n' |
| >>> markdowner.convert("**boom!**") |
| u'<p><strong>boom!</strong></p>\n' |
| |
| This implementation of Markdown implements the full "core" syntax plus a |
| number of extras (e.g., code syntax coloring, footnotes) as described on |
| <https://github.com/trentm/python-markdown2/wiki/Extras>. |
| """ |
| |
| cmdln_desc = """A fast and complete Python implementation of Markdown, a |
| text-to-HTML conversion tool for web writers. |
| |
| Supported extra syntax options (see -x|--extras option below and |
| see <https://github.com/trentm/python-markdown2/wiki/Extras> for details): |
| |
| * code-friendly: Disable _ and __ for em and strong. |
| * cuddled-lists: Allow lists to be cuddled to the preceding paragraph. |
| * fenced-code-blocks: Allows a code block to not have to be indented |
| by fencing it with '```' on a line before and after. Based on |
| <http://github.github.com/github-flavored-markdown/> with support for |
| syntax highlighting. |
| * footnotes: Support footnotes as in use on daringfireball.net and |
| implemented in other Markdown processors (tho not in Markdown.pl v1.0.1). |
| * header-ids: Adds "id" attributes to headers. The id value is a slug of |
| the header text. |
| * html-classes: Takes a dict mapping html tag names (lowercase) to a |
| string to use for a "class" tag attribute. Currently only supports |
| "pre" and "code" tags. Add an issue if you require this for other tags. |
| * markdown-in-html: Allow the use of `markdown="1"` in a block HTML tag to |
| have markdown processing be done on its contents. Similar to |
| <http://michelf.com/projects/php-markdown/extra/#markdown-attr> but with |
| some limitations. |
| * metadata: Extract metadata from a leading '---'-fenced block. |
| See <https://github.com/trentm/python-markdown2/issues/77> for details. |
| * nofollow: Add `rel="nofollow"` to add `<a>` tags with an href. See |
| <http://en.wikipedia.org/wiki/Nofollow>. |
| * pyshell: Treats unindented Python interactive shell sessions as <code> |
| blocks. |
| * link-patterns: Auto-link given regex patterns in text (e.g. bug number |
| references, revision number references). |
| * smarty-pants: Replaces ' and " with curly quotation marks or curly |
| apostrophes. Replaces --, ---, ..., and . . . with en dashes, em dashes, |
| and ellipses. |
| * toc: The returned HTML string gets a new "toc_html" attribute which is |
| a Table of Contents for the document. (experimental) |
| * xml: Passes one-liner processing instructions and namespaced XML tags. |
| * tables: Tables using the same format as GFM |
| <https://help.github.com/articles/github-flavored-markdown#tables> and |
| PHP-Markdown Extra <https://michelf.ca/projects/php-markdown/extra/#table>. |
| * wiki-tables: Google Code Wiki-style tables. See |
| <http://code.google.com/p/support/wiki/WikiSyntax#Tables>. |
| """ |
| |
| # Dev Notes: |
| # - Python's regex syntax doesn't have '\z', so I'm using '\Z'. I'm |
| # not yet sure if there implications with this. Compare 'pydoc sre' |
| # and 'perldoc perlre'. |
| |
| __version_info__ = (2, 3, 0) |
| __version__ = '.'.join(map(str, __version_info__)) |
| __author__ = "Trent Mick" |
| |
| import os |
| import sys |
| from pprint import pprint, pformat |
| import re |
| import logging |
| try: |
| from hashlib import md5 |
| except ImportError: |
| from md5 import md5 |
| import optparse |
| from random import random, randint |
| import codecs |
| |
| |
| #---- Python version compat |
| |
| try: |
| from urllib.parse import quote # python3 |
| except ImportError: |
| from urllib import quote # python2 |
| |
| if sys.version_info[:2] < (2,4): |
| from sets import Set as set |
| def reversed(sequence): |
| for i in sequence[::-1]: |
| yield i |
| |
| # Use `bytes` for byte strings and `unicode` for unicode strings (str in Py3). |
| if sys.version_info[0] <= 2: |
| py3 = False |
| try: |
| bytes |
| except NameError: |
| bytes = str |
| base_string_type = basestring |
| elif sys.version_info[0] >= 3: |
| py3 = True |
| unicode = str |
| base_string_type = str |
| |
| |
| |
| #---- globals |
| |
| DEBUG = False |
| log = logging.getLogger("markdown") |
| |
| DEFAULT_TAB_WIDTH = 4 |
| |
| |
| SECRET_SALT = bytes(randint(0, 1000000)) |
| def _hash_text(s): |
| return 'md5-' + md5(SECRET_SALT + s.encode("utf-8")).hexdigest() |
| |
| # Table of hash values for escaped characters: |
| g_escape_table = dict([(ch, _hash_text(ch)) |
| for ch in '\\`*_{}[]()>#+-.!']) |
| |
| |
| |
| #---- exceptions |
| |
| class MarkdownError(Exception): |
| pass |
| |
| |
| |
| #---- public api |
| |
| def markdown_path(path, encoding="utf-8", |
| html4tags=False, tab_width=DEFAULT_TAB_WIDTH, |
| safe_mode=None, extras=None, link_patterns=None, |
| use_file_vars=False): |
| fp = codecs.open(path, 'r', encoding) |
| text = fp.read() |
| fp.close() |
| return Markdown(html4tags=html4tags, tab_width=tab_width, |
| safe_mode=safe_mode, extras=extras, |
| link_patterns=link_patterns, |
| use_file_vars=use_file_vars).convert(text) |
| |
| def markdown(text, html4tags=False, tab_width=DEFAULT_TAB_WIDTH, |
| safe_mode=None, extras=None, link_patterns=None, |
| use_file_vars=False): |
| return Markdown(html4tags=html4tags, tab_width=tab_width, |
| safe_mode=safe_mode, extras=extras, |
| link_patterns=link_patterns, |
| use_file_vars=use_file_vars).convert(text) |
| |
| class Markdown(object): |
| # The dict of "extras" to enable in processing -- a mapping of |
| # extra name to argument for the extra. Most extras do not have an |
| # argument, in which case the value is None. |
| # |
| # This can be set via (a) subclassing and (b) the constructor |
| # "extras" argument. |
| extras = None |
| |
| urls = None |
| titles = None |
| html_blocks = None |
| html_spans = None |
| html_removed_text = "[HTML_REMOVED]" # for compat with markdown.py |
| |
| # Used to track when we're inside an ordered or unordered list |
| # (see _ProcessListItems() for details): |
| list_level = 0 |
| |
| _ws_only_line_re = re.compile(r"^[ \t]+$", re.M) |
| |
| def __init__(self, html4tags=False, tab_width=4, safe_mode=None, |
| extras=None, link_patterns=None, use_file_vars=False): |
| if html4tags: |
| self.empty_element_suffix = ">" |
| else: |
| self.empty_element_suffix = " />" |
| self.tab_width = tab_width |
| |
| # For compatibility with earlier markdown2.py and with |
| # markdown.py's safe_mode being a boolean, |
| # safe_mode == True -> "replace" |
| if safe_mode is True: |
| self.safe_mode = "replace" |
| else: |
| self.safe_mode = safe_mode |
| |
| # Massaging and building the "extras" info. |
| if self.extras is None: |
| self.extras = {} |
| elif not isinstance(self.extras, dict): |
| self.extras = dict([(e, None) for e in self.extras]) |
| if extras: |
| if not isinstance(extras, dict): |
| extras = dict([(e, None) for e in extras]) |
| self.extras.update(extras) |
| assert isinstance(self.extras, dict) |
| if "toc" in self.extras and not "header-ids" in self.extras: |
| self.extras["header-ids"] = None # "toc" implies "header-ids" |
| self._instance_extras = self.extras.copy() |
| |
| self.link_patterns = link_patterns |
| self.use_file_vars = use_file_vars |
| self._outdent_re = re.compile(r'^(\t|[ ]{1,%d})' % tab_width, re.M) |
| |
| self._escape_table = g_escape_table.copy() |
| if "smarty-pants" in self.extras: |
| self._escape_table['"'] = _hash_text('"') |
| self._escape_table["'"] = _hash_text("'") |
| |
| def reset(self): |
| self.urls = {} |
| self.titles = {} |
| self.html_blocks = {} |
| self.html_spans = {} |
| self.list_level = 0 |
| self.extras = self._instance_extras.copy() |
| if "footnotes" in self.extras: |
| self.footnotes = {} |
| self.footnote_ids = [] |
| if "header-ids" in self.extras: |
| self._count_from_header_id = {} # no `defaultdict` in Python 2.4 |
| if "metadata" in self.extras: |
| self.metadata = {} |
| |
| # Per <https://developer.mozilla.org/en-US/docs/HTML/Element/a> "rel" |
| # should only be used in <a> tags with an "href" attribute. |
| _a_nofollow = re.compile(r"<(a)([^>]*href=)", re.IGNORECASE) |
| |
| def convert(self, text): |
| """Convert the given text.""" |
| # Main function. The order in which other subs are called here is |
| # essential. Link and image substitutions need to happen before |
| # _EscapeSpecialChars(), so that any *'s or _'s in the <a> |
| # and <img> tags get encoded. |
| |
| # Clear the global hashes. If we don't clear these, you get conflicts |
| # from other articles when generating a page which contains more than |
| # one article (e.g. an index page that shows the N most recent |
| # articles): |
| self.reset() |
| |
| if not isinstance(text, unicode): |
| #TODO: perhaps shouldn't presume UTF-8 for string input? |
| text = unicode(text, 'utf-8') |
| |
| if self.use_file_vars: |
| # Look for emacs-style file variable hints. |
| emacs_vars = self._get_emacs_vars(text) |
| if "markdown-extras" in emacs_vars: |
| splitter = re.compile("[ ,]+") |
| for e in splitter.split(emacs_vars["markdown-extras"]): |
| if '=' in e: |
| ename, earg = e.split('=', 1) |
| try: |
| earg = int(earg) |
| except ValueError: |
| pass |
| else: |
| ename, earg = e, None |
| self.extras[ename] = earg |
| |
| # Standardize line endings: |
| text = re.sub("\r\n|\r", "\n", text) |
| |
| # Make sure $text ends with a couple of newlines: |
| text += "\n\n" |
| |
| # Convert all tabs to spaces. |
| text = self._detab(text) |
| |
| # Strip any lines consisting only of spaces and tabs. |
| # This makes subsequent regexen easier to write, because we can |
| # match consecutive blank lines with /\n+/ instead of something |
| # contorted like /[ \t]*\n+/ . |
| text = self._ws_only_line_re.sub("", text) |
| |
| # strip metadata from head and extract |
| if "metadata" in self.extras: |
| text = self._extract_metadata(text) |
| |
| text = self.preprocess(text) |
| |
| if "fenced-code-blocks" in self.extras and not self.safe_mode: |
| text = self._do_fenced_code_blocks(text) |
| |
| if self.safe_mode: |
| text = self._hash_html_spans(text) |
| |
| # Turn block-level HTML blocks into hash entries |
| text = self._hash_html_blocks(text, raw=True) |
| |
| if "fenced-code-blocks" in self.extras and self.safe_mode: |
| text = self._do_fenced_code_blocks(text) |
| |
| # Strip link definitions, store in hashes. |
| if "footnotes" in self.extras: |
| # Must do footnotes first because an unlucky footnote defn |
| # looks like a link defn: |
| # [^4]: this "looks like a link defn" |
| text = self._strip_footnote_definitions(text) |
| text = self._strip_link_definitions(text) |
| |
| text = self._run_block_gamut(text) |
| |
| if "footnotes" in self.extras: |
| text = self._add_footnotes(text) |
| |
| text = self.postprocess(text) |
| |
| text = self._unescape_special_chars(text) |
| |
| if self.safe_mode: |
| text = self._unhash_html_spans(text) |
| |
| if "nofollow" in self.extras: |
| text = self._a_nofollow.sub(r'<\1 rel="nofollow"\2', text) |
| |
| text += "\n" |
| |
| rv = UnicodeWithAttrs(text) |
| if "toc" in self.extras: |
| rv._toc = self._toc |
| if "metadata" in self.extras: |
| rv.metadata = self.metadata |
| return rv |
| |
| def postprocess(self, text): |
| """A hook for subclasses to do some postprocessing of the html, if |
| desired. This is called before unescaping of special chars and |
| unhashing of raw HTML spans. |
| """ |
| return text |
| |
| def preprocess(self, text): |
| """A hook for subclasses to do some preprocessing of the Markdown, if |
| desired. This is called after basic formatting of the text, but prior |
| to any extras, safe mode, etc. processing. |
| """ |
| return text |
| |
| # Is metadata if the content starts with '---'-fenced `key: value` |
| # pairs. E.g. (indented for presentation): |
| # --- |
| # foo: bar |
| # another-var: blah blah |
| # --- |
| _metadata_pat = re.compile("""^---[ \t]*\n((?:[ \t]*[^ \t:]+[ \t]*:[^\n]*\n)+)---[ \t]*\n""") |
| |
| def _extract_metadata(self, text): |
| # fast test |
| if not text.startswith("---"): |
| return text |
| match = self._metadata_pat.match(text) |
| if not match: |
| return text |
| |
| tail = text[len(match.group(0)):] |
| metadata_str = match.group(1).strip() |
| for line in metadata_str.split('\n'): |
| key, value = line.split(':', 1) |
| self.metadata[key.strip()] = value.strip() |
| |
| return tail |
| |
| |
| _emacs_oneliner_vars_pat = re.compile(r"-\*-\s*([^\r\n]*?)\s*-\*-", re.UNICODE) |
| # This regular expression is intended to match blocks like this: |
| # PREFIX Local Variables: SUFFIX |
| # PREFIX mode: Tcl SUFFIX |
| # PREFIX End: SUFFIX |
| # Some notes: |
| # - "[ \t]" is used instead of "\s" to specifically exclude newlines |
| # - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does |
| # not like anything other than Unix-style line terminators. |
| _emacs_local_vars_pat = re.compile(r"""^ |
| (?P<prefix>(?:[^\r\n|\n|\r])*?) |
| [\ \t]*Local\ Variables:[\ \t]* |
| (?P<suffix>.*?)(?:\r\n|\n|\r) |
| (?P<content>.*?\1End:) |
| """, re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE) |
| |
| def _get_emacs_vars(self, text): |
| """Return a dictionary of emacs-style local variables. |
| |
| Parsing is done loosely according to this spec (and according to |
| some in-practice deviations from this): |
| http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables |
| """ |
| emacs_vars = {} |
| SIZE = pow(2, 13) # 8kB |
| |
| # Search near the start for a '-*-'-style one-liner of variables. |
| head = text[:SIZE] |
| if "-*-" in head: |
| match = self._emacs_oneliner_vars_pat.search(head) |
| if match: |
| emacs_vars_str = match.group(1) |
| assert '\n' not in emacs_vars_str |
| emacs_var_strs = [s.strip() for s in emacs_vars_str.split(';') |
| if s.strip()] |
| if len(emacs_var_strs) == 1 and ':' not in emacs_var_strs[0]: |
| # While not in the spec, this form is allowed by emacs: |
| # -*- Tcl -*- |
| # where the implied "variable" is "mode". This form |
| # is only allowed if there are no other variables. |
| emacs_vars["mode"] = emacs_var_strs[0].strip() |
| else: |
| for emacs_var_str in emacs_var_strs: |
| try: |
| variable, value = emacs_var_str.strip().split(':', 1) |
| except ValueError: |
| log.debug("emacs variables error: malformed -*- " |
| "line: %r", emacs_var_str) |
| continue |
| # Lowercase the variable name because Emacs allows "Mode" |
| # or "mode" or "MoDe", etc. |
| emacs_vars[variable.lower()] = value.strip() |
| |
| tail = text[-SIZE:] |
| if "Local Variables" in tail: |
| match = self._emacs_local_vars_pat.search(tail) |
| if match: |
| prefix = match.group("prefix") |
| suffix = match.group("suffix") |
| lines = match.group("content").splitlines(0) |
| #print "prefix=%r, suffix=%r, content=%r, lines: %s"\ |
| # % (prefix, suffix, match.group("content"), lines) |
| |
| # Validate the Local Variables block: proper prefix and suffix |
| # usage. |
| for i, line in enumerate(lines): |
| if not line.startswith(prefix): |
| log.debug("emacs variables error: line '%s' " |
| "does not use proper prefix '%s'" |
| % (line, prefix)) |
| return {} |
| # Don't validate suffix on last line. Emacs doesn't care, |
| # neither should we. |
| if i != len(lines)-1 and not line.endswith(suffix): |
| log.debug("emacs variables error: line '%s' " |
| "does not use proper suffix '%s'" |
| % (line, suffix)) |
| return {} |
| |
| # Parse out one emacs var per line. |
| continued_for = None |
| for line in lines[:-1]: # no var on the last line ("PREFIX End:") |
| if prefix: line = line[len(prefix):] # strip prefix |
| if suffix: line = line[:-len(suffix)] # strip suffix |
| line = line.strip() |
| if continued_for: |
| variable = continued_for |
| if line.endswith('\\'): |
| line = line[:-1].rstrip() |
| else: |
| continued_for = None |
| emacs_vars[variable] += ' ' + line |
| else: |
| try: |
| variable, value = line.split(':', 1) |
| except ValueError: |
| log.debug("local variables error: missing colon " |
| "in local variables entry: '%s'" % line) |
| continue |
| # Do NOT lowercase the variable name, because Emacs only |
| # allows "mode" (and not "Mode", "MoDe", etc.) in this block. |
| value = value.strip() |
| if value.endswith('\\'): |
| value = value[:-1].rstrip() |
| continued_for = variable |
| else: |
| continued_for = None |
| emacs_vars[variable] = value |
| |
| # Unquote values. |
| for var, val in list(emacs_vars.items()): |
| if len(val) > 1 and (val.startswith('"') and val.endswith('"') |
| or val.startswith('"') and val.endswith('"')): |
| emacs_vars[var] = val[1:-1] |
| |
| return emacs_vars |
| |
| # Cribbed from a post by Bart Lateur: |
| # <http://www.nntp.perl.org/group/perl.macperl.anyperl/154> |
| _detab_re = re.compile(r'(.*?)\t', re.M) |
| def _detab_sub(self, match): |
| g1 = match.group(1) |
| return g1 + (' ' * (self.tab_width - len(g1) % self.tab_width)) |
| def _detab(self, text): |
| r"""Remove (leading?) tabs from a file. |
| |
| >>> m = Markdown() |
| >>> m._detab("\tfoo") |
| ' foo' |
| >>> m._detab(" \tfoo") |
| ' foo' |
| >>> m._detab("\t foo") |
| ' foo' |
| >>> m._detab(" foo") |
| ' foo' |
| >>> m._detab(" foo\n\tbar\tblam") |
| ' foo\n bar blam' |
| """ |
| if '\t' not in text: |
| return text |
| return self._detab_re.subn(self._detab_sub, text)[0] |
| |
| # I broke out the html5 tags here and add them to _block_tags_a and |
| # _block_tags_b. This way html5 tags are easy to keep track of. |
| _html5tags = '|article|aside|header|hgroup|footer|nav|section|figure|figcaption' |
| |
| _block_tags_a = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del' |
| _block_tags_a += _html5tags |
| |
| _strict_tag_block_re = re.compile(r""" |
| ( # save in \1 |
| ^ # start of line (with re.M) |
| <(%s) # start tag = \2 |
| \b # word break |
| (.*\n)*? # any number of lines, minimally matching |
| </\2> # the matching end tag |
| [ \t]* # trailing spaces/tabs |
| (?=\n+|\Z) # followed by a newline or end of document |
| ) |
| """ % _block_tags_a, |
| re.X | re.M) |
| |
| _block_tags_b = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math' |
| _block_tags_b += _html5tags |
| |
| _liberal_tag_block_re = re.compile(r""" |
| ( # save in \1 |
| ^ # start of line (with re.M) |
| <(%s) # start tag = \2 |
| \b # word break |
| (.*\n)*? # any number of lines, minimally matching |
| .*</\2> # the matching end tag |
| [ \t]* # trailing spaces/tabs |
| (?=\n+|\Z) # followed by a newline or end of document |
| ) |
| """ % _block_tags_b, |
| re.X | re.M) |
| |
| _html_markdown_attr_re = re.compile( |
| r'''\s+markdown=("1"|'1')''') |
| def _hash_html_block_sub(self, match, raw=False): |
| html = match.group(1) |
| if raw and self.safe_mode: |
| html = self._sanitize_html(html) |
| elif 'markdown-in-html' in self.extras and 'markdown=' in html: |
| first_line = html.split('\n', 1)[0] |
| m = self._html_markdown_attr_re.search(first_line) |
| if m: |
| lines = html.split('\n') |
| middle = '\n'.join(lines[1:-1]) |
| last_line = lines[-1] |
| first_line = first_line[:m.start()] + first_line[m.end():] |
| f_key = _hash_text(first_line) |
| self.html_blocks[f_key] = first_line |
| l_key = _hash_text(last_line) |
| self.html_blocks[l_key] = last_line |
| return ''.join(["\n\n", f_key, |
| "\n\n", middle, "\n\n", |
| l_key, "\n\n"]) |
| key = _hash_text(html) |
| self.html_blocks[key] = html |
| return "\n\n" + key + "\n\n" |
| |
| def _hash_html_blocks(self, text, raw=False): |
| """Hashify HTML blocks |
| |
| We only want to do this for block-level HTML tags, such as headers, |
| lists, and tables. That's because we still want to wrap <p>s around |
| "paragraphs" that are wrapped in non-block-level tags, such as anchors, |
| phrase emphasis, and spans. The list of tags we're looking for is |
| hard-coded. |
| |
| @param raw {boolean} indicates if these are raw HTML blocks in |
| the original source. It makes a difference in "safe" mode. |
| """ |
| if '<' not in text: |
| return text |
| |
| # Pass `raw` value into our calls to self._hash_html_block_sub. |
| hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw) |
| |
| # First, look for nested blocks, e.g.: |
| # <div> |
| # <div> |
| # tags for inner block must be indented. |
| # </div> |
| # </div> |
| # |
| # The outermost tags must start at the left margin for this to match, and |
| # the inner nested divs must be indented. |
| # We need to do this before the next, more liberal match, because the next |
| # match will start at the first `<div>` and stop at the first `</div>`. |
| text = self._strict_tag_block_re.sub(hash_html_block_sub, text) |
| |
| # Now match more liberally, simply from `\n<tag>` to `</tag>\n` |
| text = self._liberal_tag_block_re.sub(hash_html_block_sub, text) |
| |
| # Special case just for <hr />. It was easier to make a special |
| # case than to make the other regex more complicated. |
| if "<hr" in text: |
| _hr_tag_re = _hr_tag_re_from_tab_width(self.tab_width) |
| text = _hr_tag_re.sub(hash_html_block_sub, text) |
| |
| # Special case for standalone HTML comments: |
| if "<!--" in text: |
| start = 0 |
| while True: |
| # Delimiters for next comment block. |
| try: |
| start_idx = text.index("<!--", start) |
| except ValueError: |
| break |
| try: |
| end_idx = text.index("-->", start_idx) + 3 |
| except ValueError: |
| break |
| |
| # Start position for next comment block search. |
| start = end_idx |
| |
| # Validate whitespace before comment. |
| if start_idx: |
| # - Up to `tab_width - 1` spaces before start_idx. |
| for i in range(self.tab_width - 1): |
| if text[start_idx - 1] != ' ': |
| break |
| start_idx -= 1 |
| if start_idx == 0: |
| break |
| # - Must be preceded by 2 newlines or hit the start of |
| # the document. |
| if start_idx == 0: |
| pass |
| elif start_idx == 1 and text[0] == '\n': |
| start_idx = 0 # to match minute detail of Markdown.pl regex |
| elif text[start_idx-2:start_idx] == '\n\n': |
| pass |
| else: |
| break |
| |
| # Validate whitespace after comment. |
| # - Any number of spaces and tabs. |
| while end_idx < len(text): |
| if text[end_idx] not in ' \t': |
| break |
| end_idx += 1 |
| # - Must be following by 2 newlines or hit end of text. |
| if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'): |
| continue |
| |
| # Escape and hash (must match `_hash_html_block_sub`). |
| html = text[start_idx:end_idx] |
| if raw and self.safe_mode: |
| html = self._sanitize_html(html) |
| key = _hash_text(html) |
| self.html_blocks[key] = html |
| text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:] |
| |
| if "xml" in self.extras: |
| # Treat XML processing instructions and namespaced one-liner |
| # tags as if they were block HTML tags. E.g., if standalone |
| # (i.e. are their own paragraph), the following do not get |
| # wrapped in a <p> tag: |
| # <?foo bar?> |
| # |
| # <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="chapter_1.md"/> |
| _xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width) |
| text = _xml_oneliner_re.sub(hash_html_block_sub, text) |
| |
| return text |
| |
| def _strip_link_definitions(self, text): |
| # Strips link definitions from text, stores the URLs and titles in |
| # hash references. |
| less_than_tab = self.tab_width - 1 |
| |
| # Link defs are in the form: |
| # [id]: url "optional title" |
| _link_def_re = re.compile(r""" |
| ^[ ]{0,%d}\[(.+)\]: # id = \1 |
| [ \t]* |
| \n? # maybe *one* newline |
| [ \t]* |
| <?(.+?)>? # url = \2 |
| [ \t]* |
| (?: |
| \n? # maybe one newline |
| [ \t]* |
| (?<=\s) # lookbehind for whitespace |
| ['"(] |
| ([^\n]*) # title = \3 |
| ['")] |
| [ \t]* |
| )? # title is optional |
| (?:\n+|\Z) |
| """ % less_than_tab, re.X | re.M | re.U) |
| return _link_def_re.sub(self._extract_link_def_sub, text) |
| |
| def _extract_link_def_sub(self, match): |
| id, url, title = match.groups() |
| key = id.lower() # Link IDs are case-insensitive |
| self.urls[key] = self._encode_amps_and_angles(url) |
| if title: |
| self.titles[key] = title |
| return "" |
| |
| def _extract_footnote_def_sub(self, match): |
| id, text = match.groups() |
| text = _dedent(text, skip_first_line=not text.startswith('\n')).strip() |
| normed_id = re.sub(r'\W', '-', id) |
| # Ensure footnote text ends with a couple newlines (for some |
| # block gamut matches). |
| self.footnotes[normed_id] = text + "\n\n" |
| return "" |
| |
| def _strip_footnote_definitions(self, text): |
| """A footnote definition looks like this: |
| |
| [^note-id]: Text of the note. |
| |
| May include one or more indented paragraphs. |
| |
| Where, |
| - The 'note-id' can be pretty much anything, though typically it |
| is the number of the footnote. |
| - The first paragraph may start on the next line, like so: |
| |
| [^note-id]: |
| Text of the note. |
| """ |
| less_than_tab = self.tab_width - 1 |
| footnote_def_re = re.compile(r''' |
| ^[ ]{0,%d}\[\^(.+)\]: # id = \1 |
| [ \t]* |
| ( # footnote text = \2 |
| # First line need not start with the spaces. |
| (?:\s*.*\n+) |
| (?: |
| (?:[ ]{%d} | \t) # Subsequent lines must be indented. |
| .*\n+ |
| )* |
| ) |
| # Lookahead for non-space at line-start, or end of doc. |
| (?:(?=^[ ]{0,%d}\S)|\Z) |
| ''' % (less_than_tab, self.tab_width, self.tab_width), |
| re.X | re.M) |
| return footnote_def_re.sub(self._extract_footnote_def_sub, text) |
| |
| _hr_re = re.compile(r'^[ ]{0,3}([-_*][ ]{0,2}){3,}$', re.M) |
| |
| def _run_block_gamut(self, text): |
| # These are all the transformations that form block-level |
| # tags like paragraphs, headers, and list items. |
| |
| if "fenced-code-blocks" in self.extras: |
| text = self._do_fenced_code_blocks(text) |
| |
| text = self._do_headers(text) |
| |
| # Do Horizontal Rules: |
| # On the number of spaces in horizontal rules: The spec is fuzzy: "If |
| # you wish, you may use spaces between the hyphens or asterisks." |
| # Markdown.pl 1.0.1's hr regexes limit the number of spaces between the |
| # hr chars to one or two. We'll reproduce that limit here. |
| hr = "\n<hr"+self.empty_element_suffix+"\n" |
| text = re.sub(self._hr_re, hr, text) |
| |
| text = self._do_lists(text) |
| |
| if "pyshell" in self.extras: |
| text = self._prepare_pyshell_blocks(text) |
| if "wiki-tables" in self.extras: |
| text = self._do_wiki_tables(text) |
| if "tables" in self.extras: |
| text = self._do_tables(text) |
| |
| text = self._do_code_blocks(text) |
| |
| text = self._do_block_quotes(text) |
| |
| # We already ran _HashHTMLBlocks() before, in Markdown(), but that |
| # was to escape raw HTML in the original Markdown source. This time, |
| # we're escaping the markup we've just created, so that we don't wrap |
| # <p> tags around block-level tags. |
| text = self._hash_html_blocks(text) |
| |
| text = self._form_paragraphs(text) |
| |
| return text |
| |
| def _pyshell_block_sub(self, match): |
| lines = match.group(0).splitlines(0) |
| _dedentlines(lines) |
| indent = ' ' * self.tab_width |
| s = ('\n' # separate from possible cuddled paragraph |
| + indent + ('\n'+indent).join(lines) |
| + '\n\n') |
| return s |
| |
| def _prepare_pyshell_blocks(self, text): |
| """Ensure that Python interactive shell sessions are put in |
| code blocks -- even if not properly indented. |
| """ |
| if ">>>" not in text: |
| return text |
| |
| less_than_tab = self.tab_width - 1 |
| _pyshell_block_re = re.compile(r""" |
| ^([ ]{0,%d})>>>[ ].*\n # first line |
| ^(\1.*\S+.*\n)* # any number of subsequent lines |
| ^\n # ends with a blank line |
| """ % less_than_tab, re.M | re.X) |
| |
| return _pyshell_block_re.sub(self._pyshell_block_sub, text) |
| |
| def _table_sub(self, match): |
| head, underline, body = match.groups() |
| |
| # Determine aligns for columns. |
| cols = [cell.strip() for cell in underline.strip('| \t\n').split('|')] |
| align_from_col_idx = {} |
| for col_idx, col in enumerate(cols): |
| if col[0] == ':' and col[-1] == ':': |
| align_from_col_idx[col_idx] = ' align="center"' |
| elif col[0] == ':': |
| align_from_col_idx[col_idx] = ' align="left"' |
| elif col[-1] == ':': |
| align_from_col_idx[col_idx] = ' align="right"' |
| |
| # thead |
| hlines = ['<table>', '<thead>', '<tr>'] |
| cols = [cell.strip() for cell in head.strip('| \t\n').split('|')] |
| for col_idx, col in enumerate(cols): |
| hlines.append(' <th%s>%s</th>' % ( |
| align_from_col_idx.get(col_idx, ''), |
| self._run_span_gamut(col) |
| )) |
| hlines.append('</tr>') |
| hlines.append('</thead>') |
| |
| # tbody |
| hlines.append('<tbody>') |
| for line in body.strip('\n').split('\n'): |
| hlines.append('<tr>') |
| cols = [cell.strip() for cell in line.strip('| \t\n').split('|')] |
| for col_idx, col in enumerate(cols): |
| hlines.append(' <td%s>%s</td>' % ( |
| align_from_col_idx.get(col_idx, ''), |
| self._run_span_gamut(col) |
| )) |
| hlines.append('</tr>') |
| hlines.append('</tbody>') |
| hlines.append('</table>') |
| |
| return '\n'.join(hlines) + '\n' |
| |
| def _do_tables(self, text): |
| """Copying PHP-Markdown and GFM table syntax. Some regex borrowed from |
| https://github.com/michelf/php-markdown/blob/lib/Michelf/Markdown.php#L2538 |
| """ |
| less_than_tab = self.tab_width - 1 |
| table_re = re.compile(r''' |
| (?:(?<=\n\n)|\A\n?) # leading blank line |
| |
| ^[ ]{0,%d} # allowed whitespace |
| (.*[|].*) \n # $1: header row (at least one pipe) |
| |
| ^[ ]{0,%d} # allowed whitespace |
| ( # $2: underline row |
| # underline row with leading bar |
| (?: \|\ *:?-+:?\ * )+ \|? \n |
| | |
| # or, underline row without leading bar |
| (?: \ *:?-+:?\ *\| )+ (?: \ *:?-+:?\ * )? \n |
| ) |
| |
| ( # $3: data rows |
| (?: |
| ^[ ]{0,%d}(?!\ ) # ensure line begins with 0 to less_than_tab spaces |
| .*\|.* \n |
| )+ |
| ) |
| ''' % (less_than_tab, less_than_tab, less_than_tab), re.M | re.X) |
| return table_re.sub(self._table_sub, text) |
| |
| def _wiki_table_sub(self, match): |
| ttext = match.group(0).strip() |
| #print 'wiki table: %r' % match.group(0) |
| rows = [] |
| for line in ttext.splitlines(0): |
| line = line.strip()[2:-2].strip() |
| row = [c.strip() for c in re.split(r'(?<!\\)\|\|', line)] |
| rows.append(row) |
| #pprint(rows) |
| hlines = ['<table>', '<tbody>'] |
| for row in rows: |
| hrow = ['<tr>'] |
| for cell in row: |
| hrow.append('<td>') |
| hrow.append(self._run_span_gamut(cell)) |
| hrow.append('</td>') |
| hrow.append('</tr>') |
| hlines.append(''.join(hrow)) |
| hlines += ['</tbody>', '</table>'] |
| return '\n'.join(hlines) + '\n' |
| |
| def _do_wiki_tables(self, text): |
| # Optimization. |
| if "||" not in text: |
| return text |
| |
| less_than_tab = self.tab_width - 1 |
| wiki_table_re = re.compile(r''' |
| (?:(?<=\n\n)|\A\n?) # leading blank line |
| ^([ ]{0,%d})\|\|.+?\|\|[ ]*\n # first line |
| (^\1\|\|.+?\|\|\n)* # any number of subsequent lines |
| ''' % less_than_tab, re.M | re.X) |
| return wiki_table_re.sub(self._wiki_table_sub, text) |
| |
| def _run_span_gamut(self, text): |
| # These are all the transformations that occur *within* block-level |
| # tags like paragraphs, headers, and list items. |
| |
| text = self._do_code_spans(text) |
| |
| text = self._escape_special_chars(text) |
| |
| # Process anchor and image tags. |
| text = self._do_links(text) |
| |
| # Make links out of things like `<http://example.com/>` |
| # Must come after _do_links(), because you can use < and > |
| # delimiters in inline links like [this](<url>). |
| text = self._do_auto_links(text) |
| |
| if "link-patterns" in self.extras: |
| text = self._do_link_patterns(text) |
| |
| text = self._encode_amps_and_angles(text) |
| |
| text = self._do_italics_and_bold(text) |
| |
| if "smarty-pants" in self.extras: |
| text = self._do_smart_punctuation(text) |
| |
| # Do hard breaks: |
| if "break-on-newline" in self.extras: |
| text = re.sub(r" *\n", "<br%s\n" % self.empty_element_suffix, text) |
| else: |
| text = re.sub(r" {2,}\n", " <br%s\n" % self.empty_element_suffix, text) |
| |
| return text |
| |
| # "Sorta" because auto-links are identified as "tag" tokens. |
| _sorta_html_tokenize_re = re.compile(r""" |
| ( |
| # tag |
| </? |
| (?:\w+) # tag name |
| (?:\s+(?:[\w-]+:)?[\w-]+=(?:".*?"|'.*?'))* # attributes |
| \s*/?> |
| | |
| # auto-link (e.g., <http://www.activestate.com/>) |
| <\w+[^>]*> |
| | |
| <!--.*?--> # comment |
| | |
| <\?.*?\?> # processing instruction |
| ) |
| """, re.X) |
| |
| def _escape_special_chars(self, text): |
| # Python markdown note: the HTML tokenization here differs from |
| # that in Markdown.pl, hence the behaviour for subtle cases can |
| # differ (I believe the tokenizer here does a better job because |
| # it isn't susceptible to unmatched '<' and '>' in HTML tags). |
| # Note, however, that '>' is not allowed in an auto-link URL |
| # here. |
| escaped = [] |
| is_html_markup = False |
| for token in self._sorta_html_tokenize_re.split(text): |
| if is_html_markup: |
| # Within tags/HTML-comments/auto-links, encode * and _ |
| # so they don't conflict with their use in Markdown for |
| # italics and strong. We're replacing each such |
| # character with its corresponding MD5 checksum value; |
| # this is likely overkill, but it should prevent us from |
| # colliding with the escape values by accident. |
| escaped.append(token.replace('*', self._escape_table['*']) |
| .replace('_', self._escape_table['_'])) |
| else: |
| escaped.append(self._encode_backslash_escapes(token)) |
| is_html_markup = not is_html_markup |
| return ''.join(escaped) |
| |
| def _hash_html_spans(self, text): |
| # Used for safe_mode. |
| |
| def _is_auto_link(s): |
| if ':' in s and self._auto_link_re.match(s): |
| return True |
| elif '@' in s and self._auto_email_link_re.match(s): |
| return True |
| return False |
| |
| tokens = [] |
| is_html_markup = False |
| for token in self._sorta_html_tokenize_re.split(text): |
| if is_html_markup and not _is_auto_link(token): |
| sanitized = self._sanitize_html(token) |
| key = _hash_text(sanitized) |
| self.html_spans[key] = sanitized |
| tokens.append(key) |
| else: |
| tokens.append(token) |
| is_html_markup = not is_html_markup |
| return ''.join(tokens) |
| |
| def _unhash_html_spans(self, text): |
| for key, sanitized in list(self.html_spans.items()): |
| text = text.replace(key, sanitized) |
| return text |
| |
| def _sanitize_html(self, s): |
| if self.safe_mode == "replace": |
| return self.html_removed_text |
| elif self.safe_mode == "escape": |
| replacements = [ |
| ('&', '&'), |
| ('<', '<'), |
| ('>', '>'), |
| ] |
| for before, after in replacements: |
| s = s.replace(before, after) |
| return s |
| else: |
| raise MarkdownError("invalid value for 'safe_mode': %r (must be " |
| "'escape' or 'replace')" % self.safe_mode) |
| |
| _inline_link_title = re.compile(r''' |
| ( # \1 |
| [ \t]+ |
| (['"]) # quote char = \2 |
| (?P<title>.*?) |
| \2 |
| )? # title is optional |
| \)$ |
| ''', re.X | re.S) |
| _tail_of_reference_link_re = re.compile(r''' |
| # Match tail of: [text][id] |
| [ ]? # one optional space |
| (?:\n[ ]*)? # one optional newline followed by spaces |
| \[ |
| (?P<id>.*?) |
| \] |
| ''', re.X | re.S) |
| |
| _whitespace = re.compile(r'\s*') |
| |
| _strip_anglebrackets = re.compile(r'<(.*)>.*') |
| |
| def _find_non_whitespace(self, text, start): |
| """Returns the index of the first non-whitespace character in text |
| after (and including) start |
| """ |
| match = self._whitespace.match(text, start) |
| return match.end() |
| |
| def _find_balanced(self, text, start, open_c, close_c): |
| """Returns the index where the open_c and close_c characters balance |
| out - the same number of open_c and close_c are encountered - or the |
| end of string if it's reached before the balance point is found. |
| """ |
| i = start |
| l = len(text) |
| count = 1 |
| while count > 0 and i < l: |
| if text[i] == open_c: |
| count += 1 |
| elif text[i] == close_c: |
| count -= 1 |
| i += 1 |
| return i |
| |
| def _extract_url_and_title(self, text, start): |
| """Extracts the url and (optional) title from the tail of a link""" |
| # text[start] equals the opening parenthesis |
| idx = self._find_non_whitespace(text, start+1) |
| if idx == len(text): |
| return None, None, None |
| end_idx = idx |
| has_anglebrackets = text[idx] == "<" |
| if has_anglebrackets: |
| end_idx = self._find_balanced(text, end_idx+1, "<", ">") |
| end_idx = self._find_balanced(text, end_idx, "(", ")") |
| match = self._inline_link_title.search(text, idx, end_idx) |
| if not match: |
| return None, None, None |
| url, title = text[idx:match.start()], match.group("title") |
| if has_anglebrackets: |
| url = self._strip_anglebrackets.sub(r'\1', url) |
| return url, title, end_idx |
| |
| def _do_links(self, text): |
| """Turn Markdown link shortcuts into XHTML <a> and <img> tags. |
| |
| This is a combination of Markdown.pl's _DoAnchors() and |
| _DoImages(). They are done together because that simplified the |
| approach. It was necessary to use a different approach than |
| Markdown.pl because of the lack of atomic matching support in |
| Python's regex engine used in $g_nested_brackets. |
| """ |
| MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24 |
| |
| # `anchor_allowed_pos` is used to support img links inside |
| # anchors, but not anchors inside anchors. An anchor's start |
| # pos must be `>= anchor_allowed_pos`. |
| anchor_allowed_pos = 0 |
| |
| curr_pos = 0 |
| while True: # Handle the next link. |
| # The next '[' is the start of: |
| # - an inline anchor: [text](url "title") |
| # - a reference anchor: [text][id] |
| # - an inline img: ![text](url "title") |
| # - a reference img: ![text][id] |
| # - a footnote ref: [^id] |
| # (Only if 'footnotes' extra enabled) |
| # - a footnote defn: [^id]: ... |
| # (Only if 'footnotes' extra enabled) These have already |
| # been stripped in _strip_footnote_definitions() so no |
| # need to watch for them. |
| # - a link definition: [id]: url "title" |
| # These have already been stripped in |
| # _strip_link_definitions() so no need to watch for them. |
| # - not markup: [...anything else... |
| try: |
| start_idx = text.index('[', curr_pos) |
| except ValueError: |
| break |
| text_length = len(text) |
| |
| # Find the matching closing ']'. |
| # Markdown.pl allows *matching* brackets in link text so we |
| # will here too. Markdown.pl *doesn't* currently allow |
| # matching brackets in img alt text -- we'll differ in that |
| # regard. |
| bracket_depth = 0 |
| for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL, |
| text_length)): |
| ch = text[p] |
| if ch == ']': |
| bracket_depth -= 1 |
| if bracket_depth < 0: |
| break |
| elif ch == '[': |
| bracket_depth += 1 |
| else: |
| # Closing bracket not found within sentinel length. |
| # This isn't markup. |
| curr_pos = start_idx + 1 |
| continue |
| link_text = text[start_idx+1:p] |
| |
| # Possibly a footnote ref? |
| if "footnotes" in self.extras and link_text.startswith("^"): |
| normed_id = re.sub(r'\W', '-', link_text[1:]) |
| if normed_id in self.footnotes: |
| self.footnote_ids.append(normed_id) |
| result = '<sup class="footnote-ref" id="fnref-%s">' \ |
| '<a href="#fn-%s">%s</a></sup>' \ |
| % (normed_id, normed_id, len(self.footnote_ids)) |
| text = text[:start_idx] + result + text[p+1:] |
| else: |
| # This id isn't defined, leave the markup alone. |
| curr_pos = p+1 |
| continue |
| |
| # Now determine what this is by the remainder. |
| p += 1 |
| if p == text_length: |
| return text |
| |
| # Inline anchor or img? |
| if text[p] == '(': # attempt at perf improvement |
| url, title, url_end_idx = self._extract_url_and_title(text, p) |
| if url is not None: |
| # Handle an inline anchor or img. |
| is_img = start_idx > 0 and text[start_idx-1] == "!" |
| if is_img: |
| start_idx -= 1 |
| |
| # We've got to encode these to avoid conflicting |
| # with italics/bold. |
| url = url.replace('*', self._escape_table['*']) \ |
| .replace('_', self._escape_table['_']) |
| if title: |
| title_str = ' title="%s"' % ( |
| _xml_escape_attr(title) |
| .replace('*', self._escape_table['*']) |
| .replace('_', self._escape_table['_'])) |
| else: |
| title_str = '' |
| if is_img: |
| img_class_str = self._html_class_str_from_tag("img") |
| result = '<img src="%s" alt="%s"%s%s%s' \ |
| % (url.replace('"', '"'), |
| _xml_escape_attr(link_text), |
| title_str, img_class_str, self.empty_element_suffix) |
| if "smarty-pants" in self.extras: |
| result = result.replace('"', self._escape_table['"']) |
| curr_pos = start_idx + len(result) |
| text = text[:start_idx] + result + text[url_end_idx:] |
| elif start_idx >= anchor_allowed_pos: |
| result_head = '<a href="%s"%s>' % (url, title_str) |
| result = '%s%s</a>' % (result_head, link_text) |
| if "smarty-pants" in self.extras: |
| result = result.replace('"', self._escape_table['"']) |
| # <img> allowed from curr_pos on, <a> from |
| # anchor_allowed_pos on. |
| curr_pos = start_idx + len(result_head) |
| anchor_allowed_pos = start_idx + len(result) |
| text = text[:start_idx] + result + text[url_end_idx:] |
| else: |
| # Anchor not allowed here. |
| curr_pos = start_idx + 1 |
| continue |
| |
| # Reference anchor or img? |
| else: |
| match = self._tail_of_reference_link_re.match(text, p) |
| if match: |
| # Handle a reference-style anchor or img. |
| is_img = start_idx > 0 and text[start_idx-1] == "!" |
| if is_img: |
| start_idx -= 1 |
| link_id = match.group("id").lower() |
| if not link_id: |
| link_id = link_text.lower() # for links like [this][] |
| if link_id in self.urls: |
| url = self.urls[link_id] |
| # We've got to encode these to avoid conflicting |
| # with italics/bold. |
| url = url.replace('*', self._escape_table['*']) \ |
| .replace('_', self._escape_table['_']) |
| title = self.titles.get(link_id) |
| if title: |
| before = title |
| title = _xml_escape_attr(title) \ |
| .replace('*', self._escape_table['*']) \ |
| .replace('_', self._escape_table['_']) |
| title_str = ' title="%s"' % title |
| else: |
| title_str = '' |
| if is_img: |
| img_class_str = self._html_class_str_from_tag("img") |
| result = '<img src="%s" alt="%s"%s%s%s' \ |
| % (url.replace('"', '"'), |
| link_text.replace('"', '"'), |
| title_str, img_class_str, self.empty_element_suffix) |
| if "smarty-pants" in self.extras: |
| result = result.replace('"', self._escape_table['"']) |
| curr_pos = start_idx + len(result) |
| text = text[:start_idx] + result + text[match.end():] |
| elif start_idx >= anchor_allowed_pos: |
| result = '<a href="%s"%s>%s</a>' \ |
| % (url, title_str, link_text) |
| result_head = '<a href="%s"%s>' % (url, title_str) |
| result = '%s%s</a>' % (result_head, link_text) |
| if "smarty-pants" in self.extras: |
| result = result.replace('"', self._escape_table['"']) |
| # <img> allowed from curr_pos on, <a> from |
| # anchor_allowed_pos on. |
| curr_pos = start_idx + len(result_head) |
| anchor_allowed_pos = start_idx + len(result) |
| text = text[:start_idx] + result + text[match.end():] |
| else: |
| # Anchor not allowed here. |
| curr_pos = start_idx + 1 |
| else: |
| # This id isn't defined, leave the markup alone. |
| curr_pos = match.end() |
| continue |
| |
| # Otherwise, it isn't markup. |
| curr_pos = start_idx + 1 |
| |
| return text |
| |
| def header_id_from_text(self, text, prefix, n): |
| """Generate a header id attribute value from the given header |
| HTML content. |
| |
| This is only called if the "header-ids" extra is enabled. |
| Subclasses may override this for different header ids. |
| |
| @param text {str} The text of the header tag |
| @param prefix {str} The requested prefix for header ids. This is the |
| value of the "header-ids" extra key, if any. Otherwise, None. |
| @param n {int} The <hN> tag number, i.e. `1` for an <h1> tag. |
| @returns {str} The value for the header tag's "id" attribute. Return |
| None to not have an id attribute and to exclude this header from |
| the TOC (if the "toc" extra is specified). |
| """ |
| header_id = _slugify(text) |
| if prefix and isinstance(prefix, base_string_type): |
| header_id = prefix + '-' + header_id |
| if header_id in self._count_from_header_id: |
| self._count_from_header_id[header_id] += 1 |
| header_id += '-%s' % self._count_from_header_id[header_id] |
| else: |
| self._count_from_header_id[header_id] = 1 |
| return header_id |
| |
| _toc = None |
| def _toc_add_entry(self, level, id, name): |
| if self._toc is None: |
| self._toc = [] |
| self._toc.append((level, id, self._unescape_special_chars(name))) |
| |
| _h_re_base = r''' |
| (^(.+)[ \t]*\n(=+|-+)[ \t]*\n+) |
| | |
| (^(\#{1,6}) # \1 = string of #'s |
| [ \t]%s |
| (.+?) # \2 = Header text |
| [ \t]* |
| (?<!\\) # ensure not an escaped trailing '#' |
| \#* # optional closing #'s (not counted) |
| \n+ |
| ) |
| ''' |
| |
| _h_re = re.compile(_h_re_base % '*', re.X | re.M) |
| _h_re_tag_friendly = re.compile(_h_re_base % '+', re.X | re.M) |
| |
| def _h_sub(self, match): |
| if match.group(1) is not None: |
| # Setext header |
| n = {"=": 1, "-": 2}[match.group(3)[0]] |
| header_group = match.group(2) |
| else: |
| # atx header |
| n = len(match.group(5)) |
| header_group = match.group(6) |
| |
| demote_headers = self.extras.get("demote-headers") |
| if demote_headers: |
| n = min(n + demote_headers, 6) |
| header_id_attr = "" |
| if "header-ids" in self.extras: |
| header_id = self.header_id_from_text(header_group, |
| self.extras["header-ids"], n) |
| if header_id: |
| header_id_attr = ' id="%s"' % header_id |
| html = self._run_span_gamut(header_group) |
| if "toc" in self.extras and header_id: |
| self._toc_add_entry(n, header_id, html) |
| return "<h%d%s>%s</h%d>\n\n" % (n, header_id_attr, html, n) |
| |
| def _do_headers(self, text): |
| # Setext-style headers: |
| # Header 1 |
| # ======== |
| # |
| # Header 2 |
| # -------- |
| |
| # atx-style headers: |
| # # Header 1 |
| # ## Header 2 |
| # ## Header 2 with closing hashes ## |
| # ... |
| # ###### Header 6 |
| |
| if 'tag-friendly' in self.extras: |
| return self._h_re_tag_friendly.sub(self._h_sub, text) |
| return self._h_re.sub(self._h_sub, text) |
| |
| _marker_ul_chars = '*+-' |
| _marker_any = r'(?:[%s]|\d+\.)' % _marker_ul_chars |
| _marker_ul = '(?:[%s])' % _marker_ul_chars |
| _marker_ol = r'(?:\d+\.)' |
| |
| def _list_sub(self, match): |
| lst = match.group(1) |
| lst_type = match.group(3) in self._marker_ul_chars and "ul" or "ol" |
| result = self._process_list_items(lst) |
| if self.list_level: |
| return "<%s>\n%s</%s>\n" % (lst_type, result, lst_type) |
| else: |
| return "<%s>\n%s</%s>\n\n" % (lst_type, result, lst_type) |
| |
| def _do_lists(self, text): |
| # Form HTML ordered (numbered) and unordered (bulleted) lists. |
| |
| # Iterate over each *non-overlapping* list match. |
| pos = 0 |
| while True: |
| # Find the *first* hit for either list style (ul or ol). We |
| # match ul and ol separately to avoid adjacent lists of different |
| # types running into each other (see issue #16). |
| hits = [] |
| for marker_pat in (self._marker_ul, self._marker_ol): |
| less_than_tab = self.tab_width - 1 |
| whole_list = r''' |
| ( # \1 = whole list |
| ( # \2 |
| [ ]{0,%d} |
| (%s) # \3 = first list item marker |
| [ \t]+ |
| (?!\ *\3\ ) # '- - - ...' isn't a list. See 'not_quite_a_list' test case. |
| ) |
| (?:.+?) |
| ( # \4 |
| \Z |
| | |
| \n{2,} |
| (?=\S) |
| (?! # Negative lookahead for another list item marker |
| [ \t]* |
| %s[ \t]+ |
| ) |
| ) |
| ) |
| ''' % (less_than_tab, marker_pat, marker_pat) |
| if self.list_level: # sub-list |
| list_re = re.compile("^"+whole_list, re.X | re.M | re.S) |
| else: |
| list_re = re.compile(r"(?:(?<=\n\n)|\A\n?)"+whole_list, |
| re.X | re.M | re.S) |
| match = list_re.search(text, pos) |
| if match: |
| hits.append((match.start(), match)) |
| if not hits: |
| break |
| hits.sort() |
| match = hits[0][1] |
| start, end = match.span() |
| middle = self._list_sub(match) |
| text = text[:start] + middle + text[end:] |
| pos = start + len(middle) # start pos for next attempted match |
| |
| return text |
| |
| _list_item_re = re.compile(r''' |
| (\n)? # leading line = \1 |
| (^[ \t]*) # leading whitespace = \2 |
| (?P<marker>%s) [ \t]+ # list marker = \3 |
| ((?:.+?) # list item text = \4 |
| (\n{1,2})) # eols = \5 |
| (?= \n* (\Z | \2 (?P<next_marker>%s) [ \t]+)) |
| ''' % (_marker_any, _marker_any), |
| re.M | re.X | re.S) |
| |
| _last_li_endswith_two_eols = False |
| def _list_item_sub(self, match): |
| item = match.group(4) |
| leading_line = match.group(1) |
| leading_space = match.group(2) |
| if leading_line or "\n\n" in item or self._last_li_endswith_two_eols: |
| item = self._run_block_gamut(self._outdent(item)) |
| else: |
| # Recursion for sub-lists: |
| item = self._do_lists(self._outdent(item)) |
| if item.endswith('\n'): |
| item = item[:-1] |
| item = self._run_span_gamut(item) |
| self._last_li_endswith_two_eols = (len(match.group(5)) == 2) |
| return "<li>%s</li>\n" % item |
| |
| def _process_list_items(self, list_str): |
| # Process the contents of a single ordered or unordered list, |
| # splitting it into individual list items. |
| |
| # The $g_list_level global keeps track of when we're inside a list. |
| # Each time we enter a list, we increment it; when we leave a list, |
| # we decrement. If it's zero, we're not in a list anymore. |
| # |
| # We do this because when we're not inside a list, we want to treat |
| # something like this: |
| # |
| # I recommend upgrading to version |
| # 8. Oops, now this line is treated |
| # as a sub-list. |
| # |
| # As a single paragraph, despite the fact that the second line starts |
| # with a digit-period-space sequence. |
| # |
| # Whereas when we're inside a list (or sub-list), that line will be |
| # treated as the start of a sub-list. What a kludge, huh? This is |
| # an aspect of Markdown's syntax that's hard to parse perfectly |
| # without resorting to mind-reading. Perhaps the solution is to |
| # change the syntax rules such that sub-lists must start with a |
| # starting cardinal number; e.g. "1." or "a.". |
| self.list_level += 1 |
| self._last_li_endswith_two_eols = False |
| list_str = list_str.rstrip('\n') + '\n' |
| list_str = self._list_item_re.sub(self._list_item_sub, list_str) |
| self.list_level -= 1 |
| return list_str |
| |
| def _get_pygments_lexer(self, lexer_name): |
| try: |
| from pygments import lexers, util |
| except ImportError: |
| return None |
| try: |
| return lexers.get_lexer_by_name(lexer_name) |
| except util.ClassNotFound: |
| return None |
| |
| def _color_with_pygments(self, codeblock, lexer, **formatter_opts): |
| import pygments |
| import pygments.formatters |
| |
| class HtmlCodeFormatter(pygments.formatters.HtmlFormatter): |
| def _wrap_code(self, inner): |
| """A function for use in a Pygments Formatter which |
| wraps in <code> tags. |
| """ |
| yield 0, "<code>" |
| for tup in inner: |
| yield tup |
| yield 0, "</code>" |
| |
| def wrap(self, source, outfile): |
| """Return the source with a code, pre, and div.""" |
| return self._wrap_div(self._wrap_pre(self._wrap_code(source))) |
| |
| formatter_opts.setdefault("cssclass", "codehilite") |
| formatter = HtmlCodeFormatter(**formatter_opts) |
| return pygments.highlight(codeblock, lexer, formatter) |
| |
| def _code_block_sub(self, match, is_fenced_code_block=False): |
| lexer_name = None |
| if is_fenced_code_block: |
| lexer_name = match.group(1) |
| if lexer_name: |
| formatter_opts = self.extras['fenced-code-blocks'] or {} |
| codeblock = match.group(2) |
| codeblock = codeblock[:-1] # drop one trailing newline |
| else: |
| codeblock = match.group(1) |
| codeblock = self._outdent(codeblock) |
| codeblock = self._detab(codeblock) |
| codeblock = codeblock.lstrip('\n') # trim leading newlines |
| codeblock = codeblock.rstrip() # trim trailing whitespace |
| |
| # Note: "code-color" extra is DEPRECATED. |
| if "code-color" in self.extras and codeblock.startswith(":::"): |
| lexer_name, rest = codeblock.split('\n', 1) |
| lexer_name = lexer_name[3:].strip() |
| codeblock = rest.lstrip("\n") # Remove lexer declaration line. |
| formatter_opts = self.extras['code-color'] or {} |
| |
| if lexer_name: |
| def unhash_code( codeblock ): |
| for key, sanitized in list(self.html_spans.items()): |
| codeblock = codeblock.replace(key, sanitized) |
| replacements = [ |
| ("&", "&"), |
| ("<", "<"), |
| (">", ">") |
| ] |
| for old, new in replacements: |
| codeblock = codeblock.replace(old, new) |
| return codeblock |
| lexer = self._get_pygments_lexer(lexer_name) |
| if lexer: |
| codeblock = unhash_code( codeblock ) |
| colored = self._color_with_pygments(codeblock, lexer, |
| **formatter_opts) |
| return "\n\n%s\n\n" % colored |
| |
| codeblock = self._encode_code(codeblock) |
| pre_class_str = self._html_class_str_from_tag("pre") |
| code_class_str = self._html_class_str_from_tag("code") |
| return "\n\n<pre%s><code%s>%s\n</code></pre>\n\n" % ( |
| pre_class_str, code_class_str, codeblock) |
| |
| def _html_class_str_from_tag(self, tag): |
| """Get the appropriate ' class="..."' string (note the leading |
| space), if any, for the given tag. |
| """ |
| if "html-classes" not in self.extras: |
| return "" |
| try: |
| html_classes_from_tag = self.extras["html-classes"] |
| except TypeError: |
| return "" |
| else: |
| if tag in html_classes_from_tag: |
| return ' class="%s"' % html_classes_from_tag[tag] |
| return "" |
| |
| def _do_code_blocks(self, text): |
| """Process Markdown `<pre><code>` blocks.""" |
| code_block_re = re.compile(r''' |
| (?:\n\n|\A\n?) |
| ( # $1 = the code block -- one or more lines, starting with a space/tab |
| (?: |
| (?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces |
| .*\n+ |
| )+ |
| ) |
| ((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc |
| # Lookahead to make sure this block isn't already in a code block. |
| # Needed when syntax highlighting is being used. |
| (?![^<]*\</code\>) |
| ''' % (self.tab_width, self.tab_width), |
| re.M | re.X) |
| return code_block_re.sub(self._code_block_sub, text) |
| |
| _fenced_code_block_re = re.compile(r''' |
| (?:\n\n|\A\n?) |
| ^```([\w+-]+)?[ \t]*\n # opening fence, $1 = optional lang |
| (.*?) # $2 = code block content |
| ^```[ \t]*\n # closing fence |
| ''', re.M | re.X | re.S) |
| |
| def _fenced_code_block_sub(self, match): |
| return self._code_block_sub(match, is_fenced_code_block=True); |
| |
| def _do_fenced_code_blocks(self, text): |
| """Process ```-fenced unindented code blocks ('fenced-code-blocks' extra).""" |
| return self._fenced_code_block_re.sub(self._fenced_code_block_sub, text) |
| |
| # Rules for a code span: |
| # - backslash escapes are not interpreted in a code span |
| # - to include one or or a run of more backticks the delimiters must |
| # be a longer run of backticks |
| # - cannot start or end a code span with a backtick; pad with a |
| # space and that space will be removed in the emitted HTML |
| # See `test/tm-cases/escapes.text` for a number of edge-case |
| # examples. |
| _code_span_re = re.compile(r''' |
| (?<!\\) |
| (`+) # \1 = Opening run of ` |
| (?!`) # See Note A test/tm-cases/escapes.text |
| (.+?) # \2 = The code block |
| (?<!`) |
| \1 # Matching closer |
| (?!`) |
| ''', re.X | re.S) |
| |
| def _code_span_sub(self, match): |
| c = match.group(2).strip(" \t") |
| c = self._encode_code(c) |
| return "<code>%s</code>" % c |
| |
| def _do_code_spans(self, text): |
| # * Backtick quotes are used for <code></code> spans. |
| # |
| # * You can use multiple backticks as the delimiters if you want to |
| # include literal backticks in the code span. So, this input: |
| # |
| # Just type ``foo `bar` baz`` at the prompt. |
| # |
| # Will translate to: |
| # |
| # <p>Just type <code>foo `bar` baz</code> at the prompt.</p> |
| # |
| # There's no arbitrary limit to the number of backticks you |
| # can use as delimters. If you need three consecutive backticks |
| # in your code, use four for delimiters, etc. |
| # |
| # * You can use spaces to get literal backticks at the edges: |
| # |
| # ... type `` `bar` `` ... |
| # |
| # Turns to: |
| # |
| # ... type <code>`bar`</code> ... |
| return self._code_span_re.sub(self._code_span_sub, text) |
| |
| def _encode_code(self, text): |
| """Encode/escape certain characters inside Markdown code runs. |
| The point is that in code, these characters are literals, |
| and lose their special Markdown meanings. |
| """ |
| replacements = [ |
| # Encode all ampersands; HTML entities are not |
| # entities within a Markdown code span. |
| ('&', '&'), |
| # Do the angle bracket song and dance: |
| ('<', '<'), |
| ('>', '>'), |
| ] |
| for before, after in replacements: |
| text = text.replace(before, after) |
| hashed = _hash_text(text) |
| self._escape_table[text] = hashed |
| return hashed |
| |
| _strong_re = re.compile(r"(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1", re.S) |
| _em_re = re.compile(r"(\*|_)(?=\S)(.+?)(?<=\S)\1", re.S) |
| _code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S) |
| _code_friendly_em_re = re.compile(r"\*(?=\S)(.+?)(?<=\S)\*", re.S) |
| def _do_italics_and_bold(self, text): |
| # <strong> must go first: |
| if "code-friendly" in self.extras: |
| text = self._code_friendly_strong_re.sub(r"<strong>\1</strong>", text) |
| text = self._code_friendly_em_re.sub(r"<em>\1</em>", text) |
| else: |
| text = self._strong_re.sub(r"<strong>\2</strong>", text) |
| text = self._em_re.sub(r"<em>\2</em>", text) |
| return text |
| |
| # "smarty-pants" extra: Very liberal in interpreting a single prime as an |
| # apostrophe; e.g. ignores the fact that "round", "bout", "twer", and |
| # "twixt" can be written without an initial apostrophe. This is fine because |
| # using scare quotes (single quotation marks) is rare. |
| _apostrophe_year_re = re.compile(r"'(\d\d)(?=(\s|,|;|\.|\?|!|$))") |
| _contractions = ["tis", "twas", "twer", "neath", "o", "n", |
| "round", "bout", "twixt", "nuff", "fraid", "sup"] |
| def _do_smart_contractions(self, text): |
| text = self._apostrophe_year_re.sub(r"’\1", text) |
| for c in self._contractions: |
| text = text.replace("'%s" % c, "’%s" % c) |
| text = text.replace("'%s" % c.capitalize(), |
| "’%s" % c.capitalize()) |
| return text |
| |
| # Substitute double-quotes before single-quotes. |
| _opening_single_quote_re = re.compile(r"(?<!\S)'(?=\S)") |
| _opening_double_quote_re = re.compile(r'(?<!\S)"(?=\S)') |
| _closing_single_quote_re = re.compile(r"(?<=\S)'") |
| _closing_double_quote_re = re.compile(r'(?<=\S)"(?=(\s|,|;|\.|\?|!|$))') |
| def _do_smart_punctuation(self, text): |
| """Fancifies 'single quotes', "double quotes", and apostrophes. |
| Converts --, ---, and ... into en dashes, em dashes, and ellipses. |
| |
| Inspiration is: <http://daringfireball.net/projects/smartypants/> |
| See "test/tm-cases/smarty_pants.text" for a full discussion of the |
| support here and |
| <http://code.google.com/p/python-markdown2/issues/detail?id=42> for a |
| discussion of some diversion from the original SmartyPants. |
| """ |
| if "'" in text: # guard for perf |
| text = self._do_smart_contractions(text) |
| text = self._opening_single_quote_re.sub("‘", text) |
| text = self._closing_single_quote_re.sub("’", text) |
| |
| if '"' in text: # guard for perf |
| text = self._opening_double_quote_re.sub("“", text) |
| text = self._closing_double_quote_re.sub("”", text) |
| |
| text = text.replace("---", "—") |
| text = text.replace("--", "–") |
| text = text.replace("...", "…") |
| text = text.replace(" . . . ", "…") |
| text = text.replace(". . .", "…") |
| return text |
| |
| _block_quote_re = re.compile(r''' |
| ( # Wrap whole match in \1 |
| ( |
| ^[ \t]*>[ \t]? # '>' at the start of a line |
| .+\n # rest of the first line |
| (.+\n)* # subsequent consecutive lines |
| \n* # blanks |
| )+ |
| ) |
| ''', re.M | re.X) |
| _bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M); |
| |
| _html_pre_block_re = re.compile(r'(\s*<pre>.+?</pre>)', re.S) |
| def _dedent_two_spaces_sub(self, match): |
| return re.sub(r'(?m)^ ', '', match.group(1)) |
| |
| def _block_quote_sub(self, match): |
| bq = match.group(1) |
| bq = self._bq_one_level_re.sub('', bq) # trim one level of quoting |
| bq = self._ws_only_line_re.sub('', bq) # trim whitespace-only lines |
| bq = self._run_block_gamut(bq) # recurse |
| |
| bq = re.sub('(?m)^', ' ', bq) |
| # These leading spaces screw with <pre> content, so we need to fix that: |
| bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq) |
| |
| return "<blockquote>\n%s\n</blockquote>\n\n" % bq |
| |
| def _do_block_quotes(self, text): |
| if '>' not in text: |
| return text |
| return self._block_quote_re.sub(self._block_quote_sub, text) |
| |
| def _form_paragraphs(self, text): |
| # Strip leading and trailing lines: |
| text = text.strip('\n') |
| |
| # Wrap <p> tags. |
| grafs = [] |
| for i, graf in enumerate(re.split(r"\n{2,}", text)): |
| if graf in self.html_blocks: |
| # Unhashify HTML blocks |
| grafs.append(self.html_blocks[graf]) |
| else: |
| cuddled_list = None |
| if "cuddled-lists" in self.extras: |
| # Need to put back trailing '\n' for `_list_item_re` |
| # match at the end of the paragraph. |
| li = self._list_item_re.search(graf + '\n') |
| # Two of the same list marker in this paragraph: a likely |
| # candidate for a list cuddled to preceding paragraph |
| # text (issue 33). Note the `[-1]` is a quick way to |
| # consider numeric bullets (e.g. "1." and "2.") to be |
| # equal. |
| if (li and len(li.group(2)) <= 3 and li.group("next_marker") |
| and li.group("marker")[-1] == li.group("next_marker")[-1]): |
| start = li.start() |
| cuddled_list = self._do_lists(graf[start:]).rstrip("\n") |
| assert cuddled_list.startswith("<ul>") or cuddled_list.startswith("<ol>") |
| graf = graf[:start] |
| |
| # Wrap <p> tags. |
| graf = self._run_span_gamut(graf) |
| grafs.append("<p>" + graf.lstrip(" \t") + "</p>") |
| |
| if cuddled_list: |
| grafs.append(cuddled_list) |
| |
| return "\n\n".join(grafs) |
| |
| def _add_footnotes(self, text): |
| if self.footnotes: |
| footer = [ |
| '<div class="footnotes">', |
| '<hr' + self.empty_element_suffix, |
| '<ol>', |
| ] |
| for i, id in enumerate(self.footnote_ids): |
| if i != 0: |
| footer.append('') |
| footer.append('<li id="fn-%s">' % id) |
| footer.append(self._run_block_gamut(self.footnotes[id])) |
| backlink = ('<a href="#fnref-%s" ' |
| 'class="footnoteBackLink" ' |
| 'title="Jump back to footnote %d in the text.">' |
| '↩</a>' % (id, i+1)) |
| if footer[-1].endswith("</p>"): |
| footer[-1] = footer[-1][:-len("</p>")] \ |
| + ' ' + backlink + "</p>" |
| else: |
| footer.append("\n<p>%s</p>" % backlink) |
| footer.append('</li>') |
| footer.append('</ol>') |
| footer.append('</div>') |
| return text + '\n\n' + '\n'.join(footer) |
| else: |
| return text |
| |
| # Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin: |
| # http://bumppo.net/projects/amputator/ |
| _ampersand_re = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)') |
| _naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I) |
| _naked_gt_re = re.compile(r'''(?<![a-z0-9?!/'"-])>''', re.I) |
| |
| def _encode_amps_and_angles(self, text): |
| # Smart processing for ampersands and angle brackets that need |
| # to be encoded. |
| text = self._ampersand_re.sub('&', text) |
| |
| # Encode naked <'s |
| text = self._naked_lt_re.sub('<', text) |
| |
| # Encode naked >'s |
| # Note: Other markdown implementations (e.g. Markdown.pl, PHP |
| # Markdown) don't do this. |
| text = self._naked_gt_re.sub('>', text) |
| return text |
| |
| def _encode_backslash_escapes(self, text): |
| for ch, escape in list(self._escape_table.items()): |
| text = text.replace("\\"+ch, escape) |
| return text |
| |
| _auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I) |
| def _auto_link_sub(self, match): |
| g1 = match.group(1) |
| return '<a href="%s">%s</a>' % (g1, g1) |
| |
| _auto_email_link_re = re.compile(r""" |
| < |
| (?:mailto:)? |
| ( |
| [-.\w]+ |
| \@ |
| [-\w]+(\.[-\w]+)*\.[a-z]+ |
| ) |
| > |
| """, re.I | re.X | re.U) |
| def _auto_email_link_sub(self, match): |
| return self._encode_email_address( |
| self._unescape_special_chars(match.group(1))) |
| |
| def _do_auto_links(self, text): |
| text = self._auto_link_re.sub(self._auto_link_sub, text) |
| text = self._auto_email_link_re.sub(self._auto_email_link_sub, text) |
| return text |
| |
| def _encode_email_address(self, addr): |
| # Input: an email address, e.g. "foo@example.com" |
| # |
| # Output: the email address as a mailto link, with each character |
| # of the address encoded as either a decimal or hex entity, in |
| # the hopes of foiling most address harvesting spam bots. E.g.: |
| # |
| # <a href="mailto:foo@e |
| # xample.com">foo |
| # @example.com</a> |
| # |
| # Based on a filter by Matthew Wickline, posted to the BBEdit-Talk |
| # mailing list: <http://tinyurl.com/yu7ue> |
| chars = [_xml_encode_email_char(ch) |
| for ch in "mailto:" + addr] |
| # Strip the mailto: from the visible part. |
| addr = '<a href="%s">%s</a>' \ |
| % (''.join(chars), ''.join(chars[7:])) |
| return addr |
| |
| def _do_link_patterns(self, text): |
| """Caveat emptor: there isn't much guarding against link |
| patterns being formed inside other standard Markdown links, e.g. |
| inside a [link def][like this]. |
| |
| Dev Notes: *Could* consider prefixing regexes with a negative |
| lookbehind assertion to attempt to guard against this. |
| """ |
| link_from_hash = {} |
| for regex, repl in self.link_patterns: |
| replacements = [] |
| for match in regex.finditer(text): |
| if hasattr(repl, "__call__"): |
| href = repl(match) |
| else: |
| href = match.expand(repl) |
| replacements.append((match.span(), href)) |
| for (start, end), href in reversed(replacements): |
| escaped_href = ( |
| href.replace('"', '"') # b/c of attr quote |
| # To avoid markdown <em> and <strong>: |
| .replace('*', self._escape_table['*']) |
| .replace('_', self._escape_table['_'])) |
| link = '<a href="%s">%s</a>' % (escaped_href, text[start:end]) |
| hash = _hash_text(link) |
| link_from_hash[hash] = link |
| text = text[:start] + hash + text[end:] |
| for hash, link in list(link_from_hash.items()): |
| text = text.replace(hash, link) |
| return text |
| |
| def _unescape_special_chars(self, text): |
| # Swap back in all the special characters we've hidden. |
| for ch, hash in list(self._escape_table.items()): |
| text = text.replace(hash, ch) |
| return text |
| |
| def _outdent(self, text): |
| # Remove one level of line-leading tabs or spaces |
| return self._outdent_re.sub('', text) |
| |
| |
| class MarkdownWithExtras(Markdown): |
| """A markdowner class that enables most extras: |
| |
| - footnotes |
| - code-color (only has effect if 'pygments' Python module on path) |
| |
| These are not included: |
| - pyshell (specific to Python-related documenting) |
| - code-friendly (because it *disables* part of the syntax) |
| - link-patterns (because you need to specify some actual |
| link-patterns anyway) |
| """ |
| extras = ["footnotes", "code-color"] |
| |
| |
| #---- internal support functions |
| |
| class UnicodeWithAttrs(unicode): |
| """A subclass of unicode used for the return value of conversion to |
| possibly attach some attributes. E.g. the "toc_html" attribute when |
| the "toc" extra is used. |
| """ |
| metadata = None |
| _toc = None |
| def toc_html(self): |
| """Return the HTML for the current TOC. |
| |
| This expects the `_toc` attribute to have been set on this instance. |
| """ |
| if self._toc is None: |
| return None |
| |
| def indent(): |
| return ' ' * (len(h_stack) - 1) |
| lines = [] |
| h_stack = [0] # stack of header-level numbers |
| for level, id, name in self._toc: |
| if level > h_stack[-1]: |
| lines.append("%s<ul>" % indent()) |
| h_stack.append(level) |
| elif level == h_stack[-1]: |
| lines[-1] += "</li>" |
| else: |
| while level < h_stack[-1]: |
| h_stack.pop() |
| if not lines[-1].endswith("</li>"): |
| lines[-1] += "</li>" |
| lines.append("%s</ul></li>" % indent()) |
| lines.append('%s<li><a href="#%s">%s</a>' % ( |
| indent(), id, name)) |
| while len(h_stack) > 1: |
| h_stack.pop() |
| if not lines[-1].endswith("</li>"): |
| lines[-1] += "</li>" |
| lines.append("%s</ul>" % indent()) |
| return '\n'.join(lines) + '\n' |
| toc_html = property(toc_html) |
| |
| ## {{{ http://code.activestate.com/recipes/577257/ (r1) |
| _slugify_strip_re = re.compile(r'[^\w\s-]') |
| _slugify_hyphenate_re = re.compile(r'[-\s]+') |
| def _slugify(value): |
| """ |
| Normalizes string, converts to lowercase, removes non-alpha characters, |
| and converts spaces to hyphens. |
| |
| From Django's "django/template/defaultfilters.py". |
| """ |
| import unicodedata |
| value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode() |
| value = _slugify_strip_re.sub('', value).strip().lower() |
| return _slugify_hyphenate_re.sub('-', value) |
| ## end of http://code.activestate.com/recipes/577257/ }}} |
| |
| |
| # From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549 |
| def _curry(*args, **kwargs): |
| function, args = args[0], args[1:] |
| def result(*rest, **kwrest): |
| combined = kwargs.copy() |
| combined.update(kwrest) |
| return function(*args + rest, **combined) |
| return result |
| |
| # Recipe: regex_from_encoded_pattern (1.0) |
| def _regex_from_encoded_pattern(s): |
| """'foo' -> re.compile(re.escape('foo')) |
| '/foo/' -> re.compile('foo') |
| '/foo/i' -> re.compile('foo', re.I) |
| """ |
| if s.startswith('/') and s.rfind('/') != 0: |
| # Parse it: /PATTERN/FLAGS |
| idx = s.rfind('/') |
| pattern, flags_str = s[1:idx], s[idx+1:] |
| flag_from_char = { |
| "i": re.IGNORECASE, |
| "l": re.LOCALE, |
| "s": re.DOTALL, |
| "m": re.MULTILINE, |
| "u": re.UNICODE, |
| } |
| flags = 0 |
| for char in flags_str: |
| try: |
| flags |= flag_from_char[char] |
| except KeyError: |
| raise ValueError("unsupported regex flag: '%s' in '%s' " |
| "(must be one of '%s')" |
| % (char, s, ''.join(list(flag_from_char.keys())))) |
| return re.compile(s[1:idx], flags) |
| else: # not an encoded regex |
| return re.compile(re.escape(s)) |
| |
| # Recipe: dedent (0.1.2) |
| def _dedentlines(lines, tabsize=8, skip_first_line=False): |
| """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines |
| |
| "lines" is a list of lines to dedent. |
| "tabsize" is the tab width to use for indent width calculations. |
| "skip_first_line" is a boolean indicating if the first line should |
| be skipped for calculating the indent width and for dedenting. |
| This is sometimes useful for docstrings and similar. |
| |
| Same as dedent() except operates on a sequence of lines. Note: the |
| lines list is modified **in-place**. |
| """ |
| DEBUG = False |
| if DEBUG: |
| print("dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\ |
| % (tabsize, skip_first_line)) |
| indents = [] |
| margin = None |
| for i, line in enumerate(lines): |
| if i == 0 and skip_first_line: continue |
| indent = 0 |
| for ch in line: |
| if ch == ' ': |
| indent += 1 |
| elif ch == '\t': |
| indent += tabsize - (indent % tabsize) |
| elif ch in '\r\n': |
| continue # skip all-whitespace lines |
| else: |
| break |
| else: |
| continue # skip all-whitespace lines |
| if DEBUG: print("dedent: indent=%d: %r" % (indent, line)) |
| if margin is None: |
| margin = indent |
| else: |
| margin = min(margin, indent) |
| if DEBUG: print("dedent: margin=%r" % margin) |
| |
| if margin is not None and margin > 0: |
| for i, line in enumerate(lines): |
| if i == 0 and skip_first_line: continue |
| removed = 0 |
| for j, ch in enumerate(line): |
| if ch == ' ': |
| removed += 1 |
| elif ch == '\t': |
| removed += tabsize - (removed % tabsize) |
| elif ch in '\r\n': |
| if DEBUG: print("dedent: %r: EOL -> strip up to EOL" % line) |
| lines[i] = lines[i][j:] |
| break |
| else: |
| raise ValueError("unexpected non-whitespace char %r in " |
| "line %r while removing %d-space margin" |
| % (ch, line, margin)) |
| if DEBUG: |
| print("dedent: %r: %r -> removed %d/%d"\ |
| % (line, ch, removed, margin)) |
| if removed == margin: |
| lines[i] = lines[i][j+1:] |
| break |
| elif removed > margin: |
| lines[i] = ' '*(removed-margin) + lines[i][j+1:] |
| break |
| else: |
| if removed: |
| lines[i] = lines[i][removed:] |
| return lines |
| |
| def _dedent(text, tabsize=8, skip_first_line=False): |
| """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text |
| |
| "text" is the text to dedent. |
| "tabsize" is the tab width to use for indent width calculations. |
| "skip_first_line" is a boolean indicating if the first line should |
| be skipped for calculating the indent width and for dedenting. |
| This is sometimes useful for docstrings and similar. |
| |
| textwrap.dedent(s), but don't expand tabs to spaces |
| """ |
| lines = text.splitlines(1) |
| _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line) |
| return ''.join(lines) |
| |
| |
| class _memoized(object): |
| """Decorator that caches a function's return value each time it is called. |
| If called later with the same arguments, the cached value is returned, and |
| not re-evaluated. |
| |
| http://wiki.python.org/moin/PythonDecoratorLibrary |
| """ |
| def __init__(self, func): |
| self.func = func |
| self.cache = {} |
| def __call__(self, *args): |
| try: |
| return self.cache[args] |
| except KeyError: |
| self.cache[args] = value = self.func(*args) |
| return value |
| except TypeError: |
| # uncachable -- for instance, passing a list as an argument. |
| # Better to not cache than to blow up entirely. |
| return self.func(*args) |
| def __repr__(self): |
| """Return the function's docstring.""" |
| return self.func.__doc__ |
| |
| |
| def _xml_oneliner_re_from_tab_width(tab_width): |
| """Standalone XML processing instruction regex.""" |
| return re.compile(r""" |
| (?: |
| (?<=\n\n) # Starting after a blank line |
| | # or |
| \A\n? # the beginning of the doc |
| ) |
| ( # save in $1 |
| [ ]{0,%d} |
| (?: |
| <\?\w+\b\s+.*?\?> # XML processing instruction |
| | |
| <\w+:\w+\b\s+.*?/> # namespaced single tag |
| ) |
| [ \t]* |
| (?=\n{2,}|\Z) # followed by a blank line or end of document |
| ) |
| """ % (tab_width - 1), re.X) |
| _xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width) |
| |
| def _hr_tag_re_from_tab_width(tab_width): |
| return re.compile(r""" |
| (?: |
| (?<=\n\n) # Starting after a blank line |
| | # or |
| \A\n? # the beginning of the doc |
| ) |
| ( # save in \1 |
| [ ]{0,%d} |
| <(hr) # start tag = \2 |
| \b # word break |
| ([^<>])*? # |
| /?> # the matching end tag |
| [ \t]* |
| (?=\n{2,}|\Z) # followed by a blank line or end of document |
| ) |
| """ % (tab_width - 1), re.X) |
| _hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width) |
| |
| |
| def _xml_escape_attr(attr, skip_single_quote=True): |
| """Escape the given string for use in an HTML/XML tag attribute. |
| |
| By default this doesn't bother with escaping `'` to `'`, presuming that |
| the tag attribute is surrounded by double quotes. |
| """ |
| escaped = (attr |
| .replace('&', '&') |
| .replace('"', '"') |
| .replace('<', '<') |
| .replace('>', '>')) |
| if not skip_single_quote: |
| escaped = escaped.replace("'", "'") |
| return escaped |
| |
| |
| def _xml_encode_email_char(ch): |
| # jross: We need this to be deterministic, so a different approach |
| |
| modulo = ord(ch) % 3 |
| |
| if (modulo == 0): |
| return ch |
| elif (modulo == 1): |
| return '&#%s;' % hex(ord(ch))[1:] |
| else: |
| return '&#%s;' % ord(ch) |
| |
| def _xml_encode_email_char_at_random(ch): |
| r = random() |
| # Roughly 10% raw, 45% hex, 45% dec. |
| # '@' *must* be encoded. I [John Gruber] insist. |
| # Issue 26: '_' must be encoded. |
| if r > 0.9 and ch not in "@_": |
| return ch |
| elif r < 0.45: |
| # The [1:] is to drop leading '0': 0x63 -> x63 |
| return '&#%s;' % hex(ord(ch))[1:] |
| else: |
| return '&#%s;' % ord(ch) |
| |
| |
| |
| #---- mainline |
| |
| class _NoReflowFormatter(optparse.IndentedHelpFormatter): |
| """An optparse formatter that does NOT reflow the description.""" |
| def format_description(self, description): |
| return description or "" |
| |
| def _test(): |
| import doctest |
| doctest.testmod() |
| |
| def main(argv=None): |
| if argv is None: |
| argv = sys.argv |
| if not logging.root.handlers: |
| logging.basicConfig() |
| |
| usage = "usage: %prog [PATHS...]" |
| version = "%prog "+__version__ |
| parser = optparse.OptionParser(prog="markdown2", usage=usage, |
| version=version, description=cmdln_desc, |
| formatter=_NoReflowFormatter()) |
| parser.add_option("-v", "--verbose", dest="log_level", |
| action="store_const", const=logging.DEBUG, |
| help="more verbose output") |
| parser.add_option("--encoding", |
| help="specify encoding of text content") |
| parser.add_option("--html4tags", action="store_true", default=False, |
| help="use HTML 4 style for empty element tags") |
| parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode", |
| help="sanitize literal HTML: 'escape' escapes " |
| "HTML meta chars, 'replace' replaces with an " |
| "[HTML_REMOVED] note") |
| parser.add_option("-x", "--extras", action="append", |
| help="Turn on specific extra features (not part of " |
| "the core Markdown spec). See above.") |
| parser.add_option("--use-file-vars", |
| help="Look for and use Emacs-style 'markdown-extras' " |
| "file var to turn on extras. See " |
| "<https://github.com/trentm/python-markdown2/wiki/Extras>") |
| parser.add_option("--link-patterns-file", |
| help="path to a link pattern file") |
| parser.add_option("--self-test", action="store_true", |
| help="run internal self-tests (some doctests)") |
| parser.add_option("--compare", action="store_true", |
| help="run against Markdown.pl as well (for testing)") |
| parser.set_defaults(log_level=logging.INFO, compare=False, |
| encoding="utf-8", safe_mode=None, use_file_vars=False) |
| opts, paths = parser.parse_args() |
| log.setLevel(opts.log_level) |
| |
| if opts.self_test: |
| return _test() |
| |
| if opts.extras: |
| extras = {} |
| for s in opts.extras: |
| splitter = re.compile("[,;: ]+") |
| for e in splitter.split(s): |
| if '=' in e: |
| ename, earg = e.split('=', 1) |
| try: |
| earg = int(earg) |
| except ValueError: |
| pass |
| else: |
| ename, earg = e, None |
| extras[ename] = earg |
| else: |
| extras = None |
| |
| if opts.link_patterns_file: |
| link_patterns = [] |
| f = open(opts.link_patterns_file) |
| try: |
| for i, line in enumerate(f.readlines()): |
| if not line.strip(): continue |
| if line.lstrip().startswith("#"): continue |
| try: |
| pat, href = line.rstrip().rsplit(None, 1) |
| except ValueError: |
| raise MarkdownError("%s:%d: invalid link pattern line: %r" |
| % (opts.link_patterns_file, i+1, line)) |
| link_patterns.append( |
| (_regex_from_encoded_pattern(pat), href)) |
| finally: |
| f.close() |
| else: |
| link_patterns = None |
| |
| from os.path import join, dirname, abspath, exists |
| markdown_pl = join(dirname(dirname(abspath(__file__))), "test", |
| "Markdown.pl") |
| if not paths: |
| paths = ['-'] |
| for path in paths: |
| if path == '-': |
| text = sys.stdin.read() |
| else: |
| fp = codecs.open(path, 'r', opts.encoding) |
| text = fp.read() |
| fp.close() |
| if opts.compare: |
| from subprocess import Popen, PIPE |
| print("==== Markdown.pl ====") |
| p = Popen('perl %s' % markdown_pl, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True) |
| p.stdin.write(text.encode('utf-8')) |
| p.stdin.close() |
| perl_html = p.stdout.read().decode('utf-8') |
| if py3: |
| sys.stdout.write(perl_html) |
| else: |
| sys.stdout.write(perl_html.encode( |
| sys.stdout.encoding or "utf-8", 'xmlcharrefreplace')) |
| print("==== markdown2.py ====") |
| html = markdown(text, |
| html4tags=opts.html4tags, |
| safe_mode=opts.safe_mode, |
| extras=extras, link_patterns=link_patterns, |
| use_file_vars=opts.use_file_vars) |
| if py3: |
| sys.stdout.write(html) |
| else: |
| sys.stdout.write(html.encode( |
| sys.stdout.encoding or "utf-8", 'xmlcharrefreplace')) |
| if extras and "toc" in extras: |
| log.debug("toc_html: " + |
| html.toc_html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace')) |
| if opts.compare: |
| test_dir = join(dirname(dirname(abspath(__file__))), "test") |
| if exists(join(test_dir, "test_markdown2.py")): |
| sys.path.insert(0, test_dir) |
| from test_markdown2 import norm_html_from_html |
| norm_html = norm_html_from_html(html) |
| norm_perl_html = norm_html_from_html(perl_html) |
| else: |
| norm_html = html |
| norm_perl_html = perl_html |
| print("==== match? %r ====" % (norm_perl_html == norm_html)) |
| |
| |
| if __name__ == "__main__": |
| sys.exit( main(sys.argv) ) |