Plugins are moved to infrastructure-pelican and pelicanconf.yaml/buildsite.py is primary
diff --git a/pelicanconf.py b/pelicanconf.py
deleted file mode 100644
index 2a20e34..0000000
--- a/pelicanconf.py
+++ /dev/null
@@ -1,189 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*- #
-# vim: encoding=utf-8
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-from __future__ import unicode_literals
-from datetime import date
-
-# import os
-# import sys
-
-PATH = 'content'
-
-TIMEZONE = 'UTC'
-
-DEFAULT_LANG = u'en'
-
-# Website specific settings
-AUTHOR = u'Template Community'
-SITENAME = u'Apache Template'
-SITEDOMAIN = 'template.apache.org'
-SITEURL = 'https://template.apache.org'
-SITELOGO = 'https://template.apache.org/images/logo.png'
-SITEDESC = u'Provides a template for projects wishing to use the Pelican ASF static content system'
-TRADEMARKS = u'Apache, the Apache feather logo, and "Project" are trademarks or registered trademarks'
-SITEREPOSITORY = 'https://github.com/apache/template-site/blob/main/content/'
-CURRENTYEAR = date.today().year
-
-# ASF Data specification
-ASF_DATA_YAML = "asfdata.yaml"
-
-# Save pages using full directory preservation
-PAGE_PATHS = ['.']
-
-# Path with no extension: \..* matches the extension
-PATH_METADATA = '(?P<path_no_ext>.*)\\..*'
-
-# We are not slugifying any pages
-ARTICLE_URL = ARTICLE_SAVE_AS = PAGE_URL = PAGE_SAVE_AS = '{path_no_ext}.html'
-
-# If we wanted to have articles.
-# SLUGIFY_SOURCE = 'basename'
-# ARTICLE_SAVE_AS = '{slug}.html'
-
-# Disable these pages
-ARCHIVES_SAVE_AS = ''
-AUTHORS_SAVE_AS = ''
-CATEGORIES_SAVE_AS = ''
-INDEX_SAVE_AS = ''
-TAGS_SAVE_AS = ''
-
-# We want to serve our static files mixed with content.
-STATIC_PATHS = ['.']
-
-# we want any html to be served as is
-READERS = {'html': None}
-
-# We don't use articles, but we don't want pelican to think
-# that content/ contains articles.
-ARTICLE_PATHS = ['articles']
-
-# ignore README.md files in the content tree
-IGNORE_FILES = ['README.md', 'include', 'docs']
-
-# No translations
-PAGE_TRANSLATION_ID = None
-
-# Enable ATOM feed and Disable other feeds
-FEED_DOMAIN = SITEURL
-FEED_ALL_ATOM = None
-CATEGORY_FEED_ATOM = None
-TRANSLATION_FEED_ATOM = None
-AUTHOR_FEED_ATOM = None
-AUTHOR_FEED_RSS = None
-
-# Theme
-THEME = './theme/apache'
-
-# Uncomment this to put the build date on every page.
-# DEFAULT_DATE = 'fs'
-
-# Pelican Plugins
-# The provided location. If the buildbot does not have a new plugin then look into requirements.txt
-PLUGIN_PATHS = ['./theme/plugins']
-# With pelican-sitemap
-# PLUGINS = ['asfgenid', 'asfdata', 'pelican-gfm', 'asfreader', 'sitemap']
-# With data and ezt templates
-PLUGINS = ['asfgenid', 'asfshell', 'asfdata', 'pelican-gfm', 'asfreader', 'asfcopy']
-# With asfgenid
-# PLUGINS = ['asfgenid', 'pelican-gfm']
-
-TYPOGRAPHY = True
-TYPOGRAPHY_IGNORE_TAGS = ['pre', 'code', 'style', 'script']
-
-# Lifecycle and plugins:
-# (1) Initialization:
-#     asfdata - populate a sitewide dictionary of ASF_DATA
-# (2) Readers process content into metadata and html
-#     pelican-gfm (GFMReader) - reads GFM Markdown with metadata and generates html
-#     asfreader (ASFReader) - reads GFM Markdown with embedded ezt templates uses metadata enhanced
-#          by the sitewide dictionary to generate markdown with ezt and then generate html
-# (3) HTML Content enhancement
-#     asfgenid - performs a series of enhancements to the HTML - see ASF_GENID
-# (4) Site generation
-#     sitemap - produces a sitemap.xml
-
-# Configure the asfdata plugin if you need data and ezt templates
-ASF_DATA = {
-    'data': ASF_DATA_YAML,
-    'metadata': {
-        'site_url': SITEURL
-    },
-    'debug': False
-}
-
-# Configure the asfgenid plugin
-ASF_GENID = {
-    'unsafe_tags': True,
-    'metadata': True,
-    'elements': True,
-    'headings': True,
-    'headings_re': r'^h[1-4]',
-    'permalinks': True,
-    'toc': True,
-    'toc_headers': r"h[1-4]",
-    'tables': True,
-    'debug': False
-}
-
-# Configure the asfcopy plugin to copy files outside of all other pelican processes.
-# include the directories in IGNORE_FILES
-ASF_COPY = [
-    'docs'
-]
-
-# COnfigure the shell commands for the asfshell plugin to run during initialization
-ASF_SHELL = [
-    '/bin/bash shell.sh'
-]
-
-# Sitemap Generator
-# SITEMAP = {
-#    "exclude": ["tag/", "category/"],
-#    "format": "xml",
-#    "priorities": {
-#        "articles": 0.1,
-#        "indexes": 0.1,
-#        "pages": 0.8
-#    },
-#    "changefreqs": {
-#        "articles": "never",
-#        "indexes": "never",
-#        "pages": "monthly"
-#    }
-# }
-
-# Markdown Configuration
-# When using GFMReader or ASFReader then MARKDOWN configuration is meaningless to GFM
-# MARKDOWN = {
-# }
-
-# TOC Generator
-# When using ASF_GENID TOC generation then this is unused.
-# TOC_HEADERS = r"h[1-6]"
-
-# Unused links
-LINKS = ( )
-SOCIAL = ( )
-
-DEFAULT_PAGINATION = False
-
-# Uncomment following line if you want document-relative URLs when developing
-# RELATIVE_URLS = True
diff --git a/theme/plugins/asfcopy.py b/theme/plugins/asfcopy.py
deleted file mode 100644
index e61685c..0000000
--- a/theme/plugins/asfcopy.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/python -B
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# asfcopy.py -- Pelican plugin that copies trees during finalization
-#
-
-import sys
-import shutil
-import os
-import traceback
-
-import pelican.plugins.signals
-import pelican.settings
-
-
-# copy trees from PATH to OUTPUT_PATH
-def copy_trees(pel_ob):
-    print('-----\nasfcopy')
-
-    output_path = pel_ob.settings.get('OUTPUT_PATH')
-    path = pel_ob.settings.get('PATH')
-    asf_copy = pel_ob.settings.get('ASF_COPY')
-    if asf_copy:
-        for tree in asf_copy:
-            src = os.path.join(path, tree)
-            dst = os.path.join(output_path, tree)
-            print(f'{src} --> {dst}')
-            shutil.copytree(src, dst)
-    else:
-        print("Nothing to copy")
-
-
-def tb_finalized(pel_ob):
-    """ Print any exception, before Pelican chews it into nothingness."""
-    try:
-        copy_trees(pel_ob)
-    except Exception:
-        print('-----', file=sys.stderr)
-        traceback.print_exc()
-        # exceptions here stop the build
-        raise
-
-
-def register():
-    pelican.plugins.signals.finalized.connect(tb_finalized)
diff --git a/theme/plugins/asfdata.py b/theme/plugins/asfdata.py
deleted file mode 100644
index 3d7fc30..0000000
--- a/theme/plugins/asfdata.py
+++ /dev/null
@@ -1,790 +0,0 @@
-#!/usr/bin/python -B
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# asfdata.py -- Pelican plugin that processes a yaml specification of data into a setting directory
-#
-
-import os.path
-import sys
-import subprocess
-import datetime
-import random
-import json
-import re
-import traceback
-import operator
-import pprint
-
-import requests
-import yaml
-import ezt
-
-import xml.dom.minidom
-
-import pelican.plugins.signals
-import pelican.utils
-
-from bs4 import BeautifulSoup
-
-FIXUP_HTML = [
-    (re.compile(r'&lt;'), '<'),
-    (re.compile(r'&gt;'), '>'),
-]
-
-
-# read the asfdata configuration in order to get data load and transformation instructions.
-def read_config(config_yaml, debug):
-    with pelican.utils.pelican_open(config_yaml) as text:
-        config_data = yaml.safe_load(text)
-        if debug:
-            pp = pprint.PrettyPrinter(indent=2)
-            pp.pprint(config_data)
-    return config_data
-
-
-# load yaml and json data sources.
-def load_data(path, content, debug):
-    parts = path.split('/')
-    extension = os.path.splitext(parts[-1])[1]  # split off ext, keep ext
-    if debug:
-        print(f'Loading {extension} from {path}')
-    if extension == '.json':
-        load = json.loads(content)
-    elif extension == '.yaml':
-        load = yaml.safe_load(content)
-    else:
-        load = { }
-    return load
-
-
-# load data source from a url.
-def url_data(url, debug):
-    return load_data( url, requests.get(url).text, debug)
-
-
-# load data source from a file.
-def file_data(rel_path, debug):
-    return load_data( rel_path, open(rel_path, 'r').read(), debug)
-
-
-# remove parts of a data source we don't want ro access
-def remove_part(reference, part):
-    for refs in reference:
-        if refs == part:
-            del reference[part]
-            return
-        elif isinstance(reference[refs], dict):
-            remove_part(reference[refs], part)
-
-
-# trim out parts of a data source that don't match part = True
-def where_parts(reference, part):
-    # currently only works on True parts
-    # if we trim as we go we invalidate the iterator. Instead create a deletion list.
-    filtered = [ ]
-    # first find the list that needs to be trimmed.
-    for refs in reference:
-        if not reference[refs][part]:
-            filtered.append(refs)
-    # remove the parts to be trimmed.
-    for refs in filtered:
-        del reference[refs]
-
-
-# perform alphabetation. HTTP Server is special and is put before 'A'
-def alpha_part(reference, part):
-    for refs in reference:
-        name = reference[refs][part]
-        if name == 'HTTP Server':
-            # when sorting by letter HTTPD Server is wanted first
-            letter = ' '
-        else:
-            letter = name[0].upper()
-        reference[refs]['letter'] = letter
-
-
-# rotate a roster list singleton into an name and availid
-def asfid_part(reference, part):
-    for refs in reference:
-        fix = reference[refs][part]
-        for k in fix:
-            availid = k
-            name = fix[k]['name']
-        reference[refs][part] = name
-        reference[refs]['availid'] = availid
-
-
-# add logo attribute with HEAD check for existence. If nonexistent use default.
-def add_logo(reference, part):
-    # split between logo pattern and default.
-    parts = part.split(',')
-    for item in reference:
-        # the logo pattern includes a place to insert the project/podling key
-        logo = (parts[0].format(item.key_id))
-        # HEAD request
-        response = requests.head('https://www.apache.org/' + logo)
-        if response.status_code != 200:
-            # logo not found - use the default logo
-            logo = parts[1]
-        # save the logo path as an attribute
-        setattr(item, 'logo', logo)
-    return reference
-
-
-# convert a dictionary into a sequence (list)
-def sequence_dict(seq, reference):
-    sequence = [ ]
-    for refs in reference:
-        # converting dicts into objects with attrributes. Ignore non-dict content.
-        if isinstance(reference[refs], dict):
-            # put the key of the dict  into the dictionary
-            reference[refs]['key_id'] = refs
-            for item in reference[refs]:
-                if isinstance(reference[refs][item], bool):
-                    # fixup any boolean values to be ezt.boolean - essentially True -> "yes"
-                    reference[refs][item] = ezt.boolean(reference[refs][item])
-            # convert the dict into an object with attributes and append to the sequence
-            sequence.append(type(seq, (), reference[refs]))
-    return sequence
-
-
-# convert a list into a sequence. convert dictionaries items into objects.
-def sequence_list(seq, reference):
-    sequence = [ ]
-    for refs in reference:
-        # only convert dicts into objects
-        if isinstance(refs, dict):
-            for item in refs:
-                if isinstance(refs[item], bool):
-                    # fixup any boolean values to be ezt.boolean - essentially True -> "yes"
-                    refs[item] = ezt.boolean(refs[item])
-                elif isinstance(refs[item], list):
-                    # recursively convert sub-lists
-                    refs[item] = sequence_list(item, refs[item])
-            # convert the dict into an object with attributes and append to the sequence
-            sequence.append(type(f'{seq}', (), refs))
-    return sequence
-
-
-# split a list into equal sized columns. Adds letter breaks in the alphabetical sequence.
-def split_list(metadata, seq, reference, split):
-    # copy sequence
-    sequence = list(reference)
-    # sort the copy
-    sequence.sort(key=lambda x: (x.letter, x.display_name))
-    # size of list
-    size = len(sequence)
-    # size of columns
-    percol = int((size + 26 + split - 1) / split)
-    # positions
-    start = nseq = nrow = 0
-    letter = ' '
-    # create each column
-    for column in range(split):
-        subsequence = [ ]
-        end = min(size + 26, start + percol)
-        while nrow < end:
-            if letter < sequence[nseq].letter:
-                # new letter - add a letter break into the column. If a letter has no content it is skipped
-                letter = sequence[nseq].letter
-                subsequence.append(type(seq, (), { 'letter': letter, 'display_name': letter}))
-            else:
-                # add the project into the sequence
-                subsequence.append(sequence[nseq])
-                nseq = nseq + 1
-            nrow = nrow + 1
-        # save the column sequence in the metadata
-        metadata[f'{seq}_{column}'] = subsequence
-        start = end
-    if nseq < size:
-        print(f'WARNING: {seq} not all of sequence consumed: short {size-nseq} projects')
-
-
-# process sequencing transformations to the data source
-def process_sequence(metadata, seq, sequence, load, debug):
-    reference = load
-    # has been converted to a sequence
-    is_sequence = False
-    # has been converted to a dictionary - won't be made into a sequence
-    is_dictionary = False
-    # save metadata at the end
-    save_metadata = True
-
-    # description
-    if debug and 'description' in sequence:
-        print(f'{seq}: {sequence["description"]}')
-
-    # select sub dictionary
-    if 'path' in sequence:
-        if debug:
-            print(f'path: {sequence["path"]}')
-        parts = sequence['path'].split('.')
-        for part in parts:
-            reference = reference[part]
-
-    # filter dictionary by attribute value. if filter is false discard
-    if 'where' in sequence:
-        if debug:
-            print(f'where: {sequence["where"]}')
-        where_parts(reference, sequence['where'])
-
-    # remove irrelevant keys
-    if 'trim' in sequence:
-        if debug:
-            print(f'trim: {sequence["trim"]}')
-        parts = sequence['trim'].split(',')
-        for part in parts:
-            remove_part(reference, part)
-
-    # transform roster and chair patterns
-    if 'asfid' in sequence:
-        if debug:
-            print(f'asfid: {sequence["asfid"]}')
-        asfid_part(reference, sequence['asfid'])
-
-    # add first letter ofr alphabetic categories
-    if 'alpha' in sequence:
-        if debug:
-            print(f'alpha: {sequence["alpha"]}')
-        alpha_part(reference, sequence['alpha'])
-
-    # this dictionary is derived from sub-dictionaries
-    if 'dictionary' in sequence:
-        if debug:
-            print(f'dictionary: {sequence["dictionary"]}')
-        reference = { }
-        paths = sequence['dictionary'].split(',')
-        # create a dictionary from the keys in one or more sub-dictionaries
-        for path in paths:
-            for key in load[path]:
-                reference[key] = load[path][key]
-        # dictionary result, do not sequence
-        is_dictionary = True
-
-    # this sequence is derived from another sequence
-    if 'sequence' in sequence:
-        if debug:
-            print(f'sequence: {sequence["sequence"]}')
-        reference = metadata[sequence['sequence']]
-        # sequences derived from prior sequences do not need to be converted to a sequence
-        is_sequence = True
-
-    # this sequence is a random sample of another sequence
-    if 'random' in sequence:
-        if debug:
-            print(f'random: {sequence["random"]}')
-        if is_sequence:
-            reference = random.sample(reference, sequence['random'])
-        else:
-            print(f'{seq} - random requires an existing sequence to sample')
-
-    # for a project or podling see if the logo exists w/HEAD and set the relative path.
-    if 'logo' in sequence:
-        if debug:
-            print(f'logo: {sequence["logo"]}')
-        if is_sequence:
-            # determine the project or podling logo
-            reference = add_logo(reference, sequence['logo'])
-            if seq == 'featured_pods':
-                # for podlings strip "Apache" from the beginning and "(incubating)" from the end.
-                # this is Sally's request
-                for item in reference:
-                    setattr(item, 'name', ' '.join(item.name.split(' ')[1:-1]))
-        else:
-            print(f'{seq} - logo requires an existing sequence')
-
-    # this sequence is a sorted list divided into multiple columns
-    if 'split' in sequence:
-        if debug:
-            print(f'split: {sequence["split"]}')
-        if is_sequence:
-            # create a sequence for each column
-            split_list(metadata, seq, reference, sequence['split'])
-            # created column sequences are already saved to metadata so do not do so later
-            save_metadata = False
-        else:
-            print(f'{seq} - split requires an existing sequence to split')
-
-    # if this not already a sequence or dictionary then convert to a sequence
-    if not is_sequence and not is_dictionary:
-        # convert the dictionary/list to a sequence of objects
-        if debug:
-            print(f'{seq}: create sequence')
-        if isinstance(reference, dict):
-            reference = sequence_dict(seq, reference)
-        elif isinstance(reference, list):
-            reference = sequence_list(seq, reference)
-        else:
-            print(f'{seq}: cannot proceed invalid type, must be dict or list')
-
-    # save sequence in metadata
-    if save_metadata:
-        metadata[seq] = reference
-
-
-# create metadata sequences and dictionaries from a data load
-def process_load(metadata, value, load, debug):
-    for seq in value:
-        if seq not in ('url', 'file'):
-            # one or more sequences
-            sequence = value[seq]
-            process_sequence(metadata, seq, sequence, load, debug)
-
-
-# convert byte count to human-readable (1k 2m 3g etc)
-def bytesto(bytecount, to, bsize=1024):
-    a = {'k': 1, 'm': 2, 'g': 3, 't': 4, 'p': 5, 'e': 6}
-    r = float(bytecount)
-    return r / (bsize ** a[to])
-
-
-# open a subprocess
-def os_popen(args):
-    return subprocess.Popen(args, stdout=subprocess.PIPE, universal_newlines=True)
-
-
-# retrieve the release distributions for a project from svn
-def process_distributions(project, src, sort_revision, debug):
-    if debug:
-        print(f'releases: {project}')
-
-    # current date information will help process svn ls results
-    gatherDate = datetime.datetime.utcnow()
-    gatherYear = gatherDate.year
-
-    # information to accumulate
-    signatures = {}
-    checksums = {}
-    fsizes = {}
-    dtms = {}
-    versions = {}
-    revisions = {}
-
-    # read the output from svn ls -Rv
-    url = f'https://dist.apache.org/repos/dist/release/{project}'
-    if debug:
-        print(f'releases: {url}')
-    with os_popen(['svn', 'ls', '-Rv', url]) as s:
-        for line in s.stdout:
-            line = line.strip()
-            listing = line.split(' ')
-            if line[-1:] == '/':
-                # skip directories
-                continue
-            if sort_revision:
-                revision = int(listing[0])
-            else:
-                revision = 0
-            # user = listing[1]
-            if listing[-6] == '':
-                # dtm in the past year
-                dtm1 = datetime.datetime.strptime(" ".join(listing[-4:-2]) + " " + str(gatherYear), "%b %d %Y")
-                if dtm1 > gatherDate:
-                    dtm1 = datetime.datetime.strptime(" ".join(listing[-4:-2]) + " " + str(gatherYear - 1), "%b %d %Y")
-                fsize = listing[-5]
-            else:
-                # dtm older than one year
-                dtm1 = datetime.datetime.strptime(" ".join(listing[-5:-1]), "%b %d %Y")
-                fsize = listing[-6]
-            # date is close enough
-            dtm = dtm1.strftime("%m/%d/%Y")
-            # covert to number of MB
-            if float(fsize) > 524288:
-                fsize = ('%.2f' % bytesto(fsize, 'm')) + ' MB'
-            else:
-                fsize = ('%.2f' % bytesto(fsize, 'k')) + ' KB'
-            # line is path
-            line = listing[-1]
-            # fields are parts of the path
-            fields = line.split('/')
-            # filename os the final part
-            filename = fields[-1]
-            # parts includes the whole path
-            parts = line.split('.')
-            # use the path as a key for each release
-            release = line
-            if filename:
-                if re.search('KEYS(\.txt)?$', filename):
-                    # save the KEYS file url
-                    keys = f'https://downloads.apache.org/{project}/{line}'
-                elif re.search('\.(asc|sig)$', filename, flags=re.IGNORECASE):
-                    # we key a release off of a signature. remove the extension
-                    release = '.'.join(parts[:-1])
-                    signatures[release] = filename
-                    # the path to the signature is used as the version
-                    versions[release] = '/'.join(fields[:-1])
-                    # we use the revision for sorting
-                    revisions[release] = revision
-                    if re.search(src, filename):
-                        # put source distributions in the front (it is a reverse sort)
-                        revisions[release] = revision + 100000
-                elif re.search('\.(sha512|sha1|sha256|sha|md5|mds)$', filename, flags=re.IGNORECASE):
-                    # some projects checksum their signatures
-                    part0 = ".".join(line.split('.')[-2:-1])
-                    if part0 == "asc":
-                        # skip files that are hashes of signatures
-                        continue
-                    # strip the extension to get the release name
-                    release = '.'.join(parts[:-1])
-                    checksums[release] = filename
-                else:
-                    # for the released file save the size and dtm
-                    fsizes[release] = fsize
-                    dtms[release] = dtm
-
-    # separate versions.
-    each_version = {}
-    for rel in signatures:
-        version = versions[rel]
-        if version not in each_version:
-            each_version[version] = []
-        release = rel[len(version) + 1:]
-        try:
-            each_version[version].append( Distribution(release=release,
-                                                       revision=revisions[rel],
-                                                       signature=signatures[rel],
-                                                       checksum=checksums[rel],
-                                                       dtm=dtms[rel],
-                                                       fsize=fsizes[rel]))
-        except Exception:
-            traceback.print_exc()
-
-    distributions = []
-    for version in each_version:
-        each_version[version].sort(key=lambda x: (-x.revision, x.release))
-        distributions.append( Version(version=version,
-                                      name=' '.join(version.split('/')),
-                                      revision=each_version[version][0].revision,
-                                      release=each_version[version]))
-    distributions.sort(key=lambda x: (-x.revision, x.version))
-    return keys, distributions
-
-
-# get xml text node
-def get_node_text(nodelist):
-    """http://www.python.org/doc/2.5.2/lib/minidom-example.txt"""
-    rc = ''
-    for node in nodelist:
-        if node.nodeType == node.TEXT_NODE:
-            rc = rc + node.data
-    return rc
-
-
-# get xml element's text nodes.
-def get_element_text(entry, child):
-    elements = entry.getElementsByTagName(child)
-    return get_node_text(elements[0].childNodes)
-
-
-# retrieve truncate words in html.
-def truncate_words(text, words):
-    content_text = ' '.join(text.split(' ')[:words]) + "..."
-    for regex, replace in FIXUP_HTML:
-        m = regex.search(content_text)
-        if m:
-            content_text = re.sub(regex, replace, content_text)
-    tree_soup = BeautifulSoup(content_text, 'html.parser')
-    content_text = tree_soup.prettify()
-    return content_text
-
-
-# retrieve blog posts from an Atom feed.
-def process_blog(feed, count, words, debug):
-    if debug:
-        print(f'blog feed: {feed}')
-    content = requests.get(feed).text
-    dom = xml.dom.minidom.parseString(content)
-    # dive into the dom to get 'entry' elements
-    entries = dom.getElementsByTagName('entry')
-    # we only want count many from the beginning
-    entries = entries[:count]
-    v = [ ]
-    for entry in entries:
-        if debug:
-            print(entry.tagName)
-        # we may want content
-        content_text = ''
-        if words:
-            content_text = truncate_words(get_element_text(entry, 'content'), words)
-        # we want the title and href
-        v.append(
-            {
-                'id': get_element_text(entry, 'id'),
-                'title': get_element_text(entry, 'title'),
-                'content': content_text
-            }
-        )
-    if debug:
-        for s in v:
-            print(s)
-
-    return [ Blog(href=s['id'],
-                  title=s['title'],
-                  content=s['content'])
-             for s in v]
-
-
-# to be updated from hidden location. (Need to discuss local.)
-def twitter_auth():
-    authtokens = os.path.join(os.path.expanduser('~'), '.authtokens')
-    try:
-        for line in open(authtokens).readlines():
-            if line.startswith('twitter:'):
-                token = line.strip().split(':')[1]
-                # do not print or display token as it is a secret
-                return token
-    except Exception:
-        traceback.print_exc()
-    return None
-
-
-# retrieve from twitter
-def connect_to_endpoint(url, headers):
-    response = requests.request('GET', url, headers=headers)
-    if response.status_code != 200:
-        raise Exception(response.status_code, response.text)
-    return response.json()
-
-
-# retrieve the last count recent tweets from the handle.
-def process_twitter(handle, count, debug):
-    if debug:
-        print(f'-----\ntwitter feed: {handle}')
-    bearer_token = twitter_auth()
-    if not bearer_token:
-        return sequence_list('twitter', {
-            'text': 'To retrieve tweets supply a valid twitter bearer token in ~/.authtokens'
-        })
-    # do not print or display bearer_token as it is a secret
-    query = f'from:{handle}'
-    tweet_fields = 'tweet.fields=author_id'
-    url = f'https://api.twitter.com/2/tweets/search/recent?query={query}&{tweet_fields}'
-    headers = {'Authorization': f'Bearer {bearer_token}'}
-    load = connect_to_endpoint(url, headers)
-    reference = sequence_list('twitter', load['data'])
-    if load['meta']['result_count'] < count:
-        v = reference
-    else:
-        v = reference[:count]
-    return v
-
-
-# create sequence of sequences of ASF ECCN data.
-def process_eccn(fname, debug):
-    if debug:
-        print('-----\nECCN:', fname)
-    j = yaml.safe_load(open(fname))
-
-    # versions have zero or more controlled sources
-    def make_sources(sources):
-        return [ Source(href=s['href'],
-                        manufacturer=s['manufacturer'],
-                        why=s['why'])
-                 for s in sources]
-
-    # products have one or more versions
-    def make_versions(vsns):
-        return [ Version(version=v['version'],
-                         eccn=v['eccn'],
-                         source=make_sources(v.get('source', [ ])),
-                         )
-                 for v in sorted(vsns,
-                                 key=operator.itemgetter('version'))]
-
-    # projects have one or more products
-    def make_products(prods):
-        return [ Product(name=p['name'],
-                         versions=make_versions(p['versions']),
-                         )
-                 for p in sorted(prods,
-                                 key=operator.itemgetter('name'))]
-
-    # eccn matrix has one or more projects
-    return [ Project(name=proj['name'],
-                     href=proj['href'],
-                     contact=proj['contact'],
-                     product=make_products(proj['product']))
-             for proj in sorted(j['eccnmatrix'],
-                                key=operator.itemgetter('name'))]
-
-
-# object wrappers
-class wrapper:
-    def __init__(self, **kw):
-        vars(self).update(kw)
-
-
-# Improve the names when failures occur.
-class Source(wrapper):
-    pass
-
-
-class Version(wrapper):
-    pass
-
-
-class Product(wrapper):
-    pass
-
-
-class Project(wrapper):
-    pass
-
-
-class Blog(wrapper):
-    pass
-
-
-class Distribution(wrapper):
-    pass
-
-
-# create metadata according to instructions.
-def config_read_data(pel_ob):
-    print('-----\nasfdata')
-
-    asf_data = pel_ob.settings.get('ASF_DATA')
-
-    if not asf_data:
-        print('This Pelican installation is not using ASF_DATA')
-        return
-
-    debug = asf_data['debug']
-
-    if debug:
-        for key in asf_data:
-            print(f'config: [{key}] = {asf_data[key]}')
-
-    # This must be present in ASF_DATA. It contains data for use
-    # by our plugins, and possibly where we load/inject data from
-    # other sources.
-    metadata = asf_data['metadata']
-
-    # Lift data from ASF_DATA['data'] into METADATA
-    if 'data' in asf_data:
-        if debug:
-            print(f'Processing {asf_data["data"]}')
-        config_data = read_config(asf_data['data'], debug)
-        for key in config_data:
-            # first check for data that is a singleton with special handling
-            if key == 'eccn':
-                # process eccn data
-                fname = config_data[key]['file']
-                metadata[key] = v = process_eccn(fname, debug)
-                if debug:
-                    print('ECCN V:', v)
-                continue
-
-            if key == 'twitter':
-                # process twitter data
-                # if we decide to have multiple twitter feeds available then move next to blog below
-                handle = config_data[key]['handle']
-                count = config_data[key]['count']
-                metadata[key] = v = process_twitter(handle, count, debug)
-                if debug:
-                    print('TWITTER V:', v)
-                continue
-
-            value = config_data[key]
-            if isinstance(value, dict):
-                # dictionaries may have multiple data structures that are processed with a sequence of actions
-                # into multiple sequences and dictionaries.
-                if debug:
-                    print(f'-----\n{key} creates one or more sequences')
-                    print(value)
-                # special cases that are multiple are processed first
-                if 'blog' in value:
-                    # process blog feed
-                    feed = config_data[key]['blog']
-                    count = config_data[key]['count']
-                    if 'content' in config_data[key].keys():
-                        words = config_data[key]['content']
-                    else:
-                        words = None
-                    metadata[key] = v = process_blog(feed, count, words, debug)
-                    if debug:
-                        print('BLOG V:', v)
-                    continue
-
-                elif 'release' in value:
-                    # retrieve active release distributions
-                    src = config_data[key]['src']
-                    revision = config_data[key]['revision']
-                    project = config_data[key]['release']
-                    keys, distributions = process_distributions(project, src, revision, debug)
-                    metadata[key] = v = distributions
-                    metadata[f"{key}-keys"] = keys
-                    metadata[f"{key}-project"] = project
-                    if debug:
-                        print('RELEASE V:', v)
-
-                elif 'url' in value:
-                    # process a url based data source
-                    load = url_data(value['url'], debug)
-                    process_load(metadata, value, load, debug)
-
-                elif 'file' in value:
-                    # process a file from within the site tree
-                    load = file_data(value['file'], debug)
-                    process_load(metadata, value, load, debug)
-
-                else:
-                    # should probably be an error but doesn't matter
-                    metadata[key] = value
-            else:
-                # simple metadata values - either an int or str
-                if debug:
-                    print(f'{key} = {value}')
-                metadata[key] = value
-
-    # display asfdata metadata or metadata type
-    print('-----')
-    for key in metadata:
-        if debug:
-            print(f'metadata[{key}] =')
-            print(metadata[key])
-            print('-----')
-        elif isinstance(metadata[key], str):
-            print(f'metadata[{key}] = "{metadata[key]}"')
-        elif isinstance(metadata[key], int):
-            print(f'metadata[{key}] = {metadata[key]}')
-        elif isinstance(metadata[key], list):
-            print(f'metadata[{key}] is a sequence.')
-        elif isinstance(metadata[key], dict):
-            print(f'metadata[{key}] is a dictionary.')
-        else:
-            keytype = type(metadata[key])
-            print(f'metadata[{key}] is a {keytype}')
-    print('-----')
-
-
-def tb_initialized(pel_ob):
-    """ Print any exception, before Pelican chews it into nothingness."""
-    try:
-        config_read_data(pel_ob)
-    except Exception:
-        print('-----', file=sys.stderr)
-        traceback.print_exc()
-        # exceptions here stop the build
-        raise
-
-
-def register():
-    # Hook the "initialized" signal, to load our custom data.
-    pelican.plugins.signals.initialized.connect(tb_initialized)
diff --git a/theme/plugins/asfgenid.py b/theme/plugins/asfgenid.py
deleted file mode 100644
index d508675..0000000
--- a/theme/plugins/asfgenid.py
+++ /dev/null
@@ -1,410 +0,0 @@
-'''
-asfgenid
-===================================
-Generates HeadingIDs, ElementID, and PermaLinks
-First find all specified IDs and classes. Assure unique ID and permalink
-Next find all headings missing IDs. Assure unique ID and permalink
-Generates a Table of Content
-'''
-
-# from __future__ import unicode_literals
-
-import sys
-import traceback
-import re
-import unicodedata
-
-from bs4 import BeautifulSoup, Comment
-
-import pelican.contents
-import pelican.plugins.signals
-
-'''
-Based on
-https://github.com/waylan/Python-Markdown/blob/master/markdown/extensions/headerid.py
-Which is BSD licensed, but is very much rewritten.
-'''
-
-ASF_GENID = {
-    'unsafe_tags': True,        # fix script, style, and iframe html that gfm filters as unsafe
-    'metadata': True,           # {{ metadata }} inclusion of data in the html.
-    'elements': True,	        # {#id} and {.class} annotations.
-    'headings': True,	        # add slugified id to headings missing id. Can be overridden by page metadata.
-    'headings_re': r'^h[1-6]',  # regex for which headings to check.
-    'permalinks': True,	        # add permalinks to elements and headings when id is added.
-    'toc': True,  	        # check for [TOC] and add Table of Content if present.
-    'toc_headers': r'h[1-6]',   # regex for which headings to include in the [TOC]
-    'tables': True,	        # add class="table" for tables missing class.
-    'debug': False
-}
-
-# Fixup tuples for HTML that GFM makes into text.
-FIXUP_UNSAFE = [
-    (re.compile(r'&lt;script'), '<script'),
-    (re.compile(r'&lt;/script'), '</script'),
-    (re.compile(r'&lt;style'), '<style'),
-    (re.compile(r'&lt;/style'), '</style'),
-    (re.compile(r'&lt;iframe'), '<iframe'),
-    (re.compile(r'&lt;/iframe'), '</iframe')
-]
-
-# Find {{ metadata }} inclusions
-METADATA_RE = re.compile(r'{{\s*(?P<meta>[-_:a-zA-Z0-9]+)\s*}}')
-
-# Find {#id} or {.class} elementid annotations
-ELEMENTID_RE = re.compile(r'(?:[ \t]*[{\[][ \t]*(?P<type>[#.])(?P<id>[-._:a-zA-Z0-9 ]+)[}\]])(\n|$)')
-
-# ID duplicates match
-IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$')
-
-# For permalinks
-LINK_CHAR = '¶'
-
-# strip permalink chars from headings for ToC
-PARA_MAP = {
-    ord(LINK_CHAR): None
-}
-
-# Find table tags - to check for ones without class attribute.
-TABLE_RE = re.compile(r'^table')
-
-
-# An item in a Table of Contents - from toc.py
-class HtmlTreeNode(object):
-    def __init__(self, parent, header, level, tag_id):
-        self.children = []
-        self.parent = parent
-        self.header = header
-        self.level = level
-        self.tag_id = tag_id
-
-    def add(self, new_header):
-        new_level = new_header.name
-        new_string = new_header.string
-        new_id = new_header.attrs.get('id')
-
-        if not new_string:
-            new_string = new_header.find_all(
-                text=lambda t: not isinstance(t, Comment),
-                recursive=True)
-            new_string = ''.join(new_string)
-        new_string = new_string.translate(PARA_MAP)
-
-        if self.level < new_level:
-            new_node = HtmlTreeNode(self, new_string, new_level, new_id)
-            self.children += [new_node]
-            return new_node, new_header
-        elif self.level == new_level:
-            new_node = HtmlTreeNode(self.parent, new_string, new_level, new_id)
-            self.parent.children += [new_node]
-            return new_node, new_header
-        elif self.level > new_level:
-            return self.parent.add(new_header)
-
-    def __str__(self):
-        ret = ''
-        if self.parent:
-            ret = "<a class='toc-href' href='#{0}' title='{1}'>{1}</a>".format(
-                self.tag_id, self.header)
-
-        if self.children:
-            ret += "<ul>{}</ul>".format('{}' * len(self.children)).format(
-                *self.children)
-
-        if self.parent:
-            ret = "<li>{}</li>".format(ret)
-
-        if not self.parent:
-            ret = "<div id='toc'>{}</div>".format(ret)
-
-        return ret
-
-
-# assure configuration
-def init_default_config(pelican):
-    from pelican.settings import DEFAULT_CONFIG
-
-    DEFAULT_CONFIG.setdefault('ASF_GENID', ASF_GENID)
-    if(pelican):
-        pelican.settings.setdefault('ASF_GENID', ASF_GENID)
-
-
-# from Apache CMS markdown/extensions/headerid.py - slugify in the same way as the Apache CMS
-def slugify(value, separator):
-    """ Slugify a string, to make it URL friendly. """
-    value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
-    value = re.sub('[^\\w\\s-]', '', value.decode('ascii')).strip().lower()
-    return re.sub('[%s\\s]+' % separator, separator, value)
-
-
-# Ensure an id is unique in a set of ids. Append '_1', '_2'... if not
-def unique(tag_id, ids):
-    while tag_id in ids or not tag_id:
-        m = IDCOUNT_RE.match(tag_id)
-        print(f'id="{tag_id}" is a duplicate')
-        if m:
-            tag_id = '%s_%d' % (m.group(1), int(m.group(2)) + 1)
-        else:
-            tag_id = '%s_%d' % (tag_id, 1)
-    ids.add(tag_id)
-    return tag_id
-
-
-# append a permalink
-def permalink(soup, mod_element):
-    new_tag = soup.new_tag('a', href='#' + mod_element['id'])
-    new_tag['class'] = 'headerlink'
-    new_tag['title'] = 'Permalink'
-    new_tag.string = LINK_CHAR
-    mod_element.append(new_tag)
-
-
-# fixup cmark content - note that this may be too hungry. It may need to occur later and skipped in codeblock and pre tags.
-def fixup_content(content):
-    text = content._content
-    modified = False
-    # Find messed up html
-    for regex, replace in FIXUP_UNSAFE:
-        m = regex.search(text)
-        if m:
-            modified = True
-            text = re.sub(regex, replace, text)
-    if modified:
-        content._content = text
-
-
-# expand metadata found in {{ key }}
-def expand_metadata(tag, metadata):
-    this_string = str(tag.string)
-    m = 1
-    modified = False
-    while m:
-        m = METADATA_RE.search(this_string)
-        if m:
-            this_data = m.group(1).strip()
-            format_string = '{{{0}}}'.format(this_data)
-            try:
-                new_string = format_string.format(**metadata)
-                print(f'{{{{{m.group(1)}}}}} -> {new_string}')
-            except Exception:
-                # the data expression was not found
-                print(f'{{{{{m.group(1)}}}}} is not found')
-                new_string = format_string
-            # replace the first pattern with the new_string
-            this_string = re.sub(METADATA_RE, new_string, this_string, count=1)
-            modified = True
-    if modified:
-        tag.string.replace_with(this_string)
-
-
-# do elementid transformation for {#id} and {.class} attribute annotations.
-def elementid_transform(ids, soup, tag, permalinks, perma_set, debug):
-    tagnav = tag.parent
-    this_string = str(tag.string)
-    if debug:
-        print(f'name = {tagnav.name}, string = {this_string}')
-    if tagnav.name not in ['[document]', 'code', 'pre']:
-        m = ELEMENTID_RE.search(tag.string)
-        if m:
-            # this replacement could be better it truncates and likely drops additional annotations
-            tag.string.replace_with(this_string[:m.start()])
-            if m.group('type') == '#':
-                # id attribute annotation
-                tagnav['id'] = unique(m.group('id'), ids)
-                if permalinks:
-                    permalink(soup, tagnav)
-                    unique(tagnav['id'], perma_set)
-                if debug:
-                    print(f'# insertion {tagnav}')
-            else:
-                # class attribute annotation (regex only recognizes the two types)
-                tagnav['class'] = m.group('id')
-                if debug:
-                    print(f'Class {tag.name} : {tagnav["class"]}')
-
-
-# generate id for a heading
-def headingid_transform(ids, soup, tag, permalinks, perma_set):
-    new_string = tag.string
-    if not new_string:
-        # roll up strings if no immediate string
-        new_string = tag.find_all(
-            text=lambda t: not isinstance(t, Comment),
-            recursive=True)
-        new_string = ''.join(new_string)
-
-    # don't have an id create it from text
-    new_id = slugify(new_string, '-')
-    tag['id'] = unique(new_id, ids)
-    if permalinks:
-        permalink(soup, tag)
-        # inform if there is a duplicate permalink
-        unique(tag['id'], perma_set)
-
-
-# generate table of contents from headings after [TOC] content
-def generate_toc(content, tags, title, toc_headers):
-    settoc = False
-    tree = node = HtmlTreeNode(None, title, 'h0', '')
-    # find the last [TOC]
-    taglast = tags[0]
-    for tag in tags:
-        taglast = tag
-    # find all headings after the final [TOC]
-    heading_re = re.compile(toc_headers)
-    for header in taglast.findAllNext(heading_re):
-        # we have heading content for the ToC
-        settoc = True
-        # add the heading.
-        node, _new_header = node.add(header)
-    # convert the ToC to Beautiful Soup
-    tree_soup = ''
-    if settoc:
-        print('  ToC')
-        # convert the HtmlTreeNode into Beautiful Soup
-        tree_string = '{}'.format(tree)
-        tree_soup = BeautifulSoup(tree_string, 'html.parser')
-        # Make the ToC available to the theme's template
-        content.toc = tree_soup.decode(formatter='html')
-    # replace the first [TOC] with the generated table of contents
-    for tag in tags:
-        tag.replaceWith(tree_soup)
-        # replace additional [TOC] with nothing
-        tree_soup = ''
-
-
-# create breadcrumb html
-def make_breadcrumbs(rel_source_path, title):
-    parts = rel_source_path.split('/')
-    url = '/'
-    crumbs = []
-    crumbs.append('<a href="/">Home</a>&nbsp;&raquo&nbsp;')
-    # don't process the filename part
-    last = len(parts) - 1
-    for i in range(last):
-        url = f"{url}{parts[i]}/"
-        p = parts[i].capitalize()
-        crumbs.append(f'<a href="{url}">{p}</a>&nbsp;&raquo&nbsp;')
-    crumbs.append(f'<a href="#">{title}</a>')
-    return ''.join(crumbs)
-
-
-# add the asfdata metadata into GFM content.
-def add_data(content):
-    """ Mix in ASF data as metadata """
-
-    # if the reader is 'asf' then the asf metadata is already in place during asfreader plugin.
-    if content.metadata.get('reader') != 'asf':
-        asf_metadata = content.settings.get('ASF_DATA', { }).get('metadata')
-        if asf_metadata:
-            content.metadata.update(asf_metadata)
-
-
-# main worker transforming the html
-def generate_id(content):
-    if isinstance(content, pelican.contents.Static):
-        return
-
-    # get plugin settings
-    asf_genid = content.settings['ASF_GENID']
-    # asf_headings setting may be overridden
-    asf_headings = content.metadata.get('asf_headings', str(asf_genid['headings']))
-
-    # show active plugins
-    if asf_genid['debug']:
-        print('asfgenid:\nshow plugins in case one is processing before this one')
-        for name in content.settings['PLUGINS']:
-            print(f'plugin: {name}')
-
-    # track the id tags
-    ids = set()
-    # track permalinks
-    permalinks = set()
-
-    # step 1 - fixup html that cmark marks unsafe - move to later?
-    if asf_genid['unsafe_tags']:
-        fixup_content(content)
-
-    # step 2 - prepare for genid processes
-    # parse html content into BeautifulSoup4
-    soup = BeautifulSoup(content._content, 'html.parser')
-    # page title
-    title = content.metadata.get('title', 'Title')
-    # assure relative source path is in the metadata
-    content.metadata['relative_source_path'] = rel_source_path = content.relative_source_path
-    # create breadcrumb html
-    content.metadata['breadcrumbs'] = breadcrumbs = make_breadcrumbs(rel_source_path, title)
-    # display output path and title
-    print(f'{content.relative_source_path} - {title}')
-    # if debug display breadcrumb html
-    if asf_genid['debug']:
-        print(f'    {breadcrumbs}')
-    # enhance metadata if done by asfreader
-    add_data(content)
-
-    # step 3 - metadata expansion
-    if asf_genid['metadata']:
-        if asf_genid['debug']:
-            print(f'metadata expansion: {content.relative_source_path}')
-        for tag in soup.findAll(string=METADATA_RE):
-            expand_metadata(tag, content.metadata)
-
-    # step 4 - find all id attributes already present
-    for tag in soup.findAll(id=True):
-        unique(tag['id'], ids)
-        # don't change existing ids
-
-    # step 5 - find all {#id} and {.class} text and assign attributes
-    if asf_genid['elements']:
-        if asf_genid['debug']:
-            print(f'elementid: {content.relative_source_path}')
-        for tag in soup.findAll(string=ELEMENTID_RE):
-            elementid_transform(ids, soup, tag, asf_genid['permalinks'], permalinks, asf_genid['debug'])
-
-    # step 6 - find all headings w/o ids already present or assigned with {#id} text
-    if asf_headings == 'True':
-        if asf_genid['debug']:
-            print(f'headings: {content.relative_source_path}')
-        # Find heading tags
-        HEADING_RE = re.compile(asf_genid['headings_re'])
-        for tag in soup.findAll(HEADING_RE, id=False):
-            headingid_transform(ids, soup, tag, asf_genid['permalinks'], permalinks)
-
-    # step 7 - find all tables without class
-    if asf_genid['tables']:
-        if asf_genid['debug']:
-            print(f'tables: {content.relative_source_path}')
-        for tag in soup.findAll(TABLE_RE, _class=False):
-            tag['class'] = 'table'
-
-    # step 8 - find TOC tag and generate Table of Contents
-    if asf_genid['toc']:
-        tags = soup('p', text='[TOC]')
-        if tags:
-            generate_toc(content, tags, title, asf_genid['toc_headers'])
-
-    # step 9 - reset the html content
-    content._content = soup.decode(formatter='html')
-
-    # step 10 - output all of the permalinks created
-    for tag in permalinks:
-        print(f'    #{tag}')
-
-
-def tb_connect(pel_ob):
-    """Print any exception, before Pelican chews it into nothingness."""
-    try:
-        generate_id(pel_ob)
-    except Exception:
-        print('-----', file=sys.stderr)
-        print('FATAL: %s' % (pel_ob.relative_source_path), file=sys.stderr)
-        traceback.print_exc()
-        # if we have errors in this module then we want to quit to avoid erasing the site
-        sys.exit(4)
-
-
-def register():
-    pelican.plugins.signals.initialized.connect(init_default_config)
-
-
-pelican.plugins.signals.content_object_init.connect(tb_connect)
diff --git a/theme/plugins/asfreader.py b/theme/plugins/asfreader.py
deleted file mode 100644
index 8d26d15..0000000
--- a/theme/plugins/asfreader.py
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/python -B
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# asfreader.py -- Pelican plugin that processes ezt template Markdown through ezt and  then GitHub Flavored Markdown.
-#
-
-import sys
-import io
-import os
-import traceback
-
-import re
-import ezt
-
-import pelican.plugins.signals
-import pelican.readers
-import pelican.settings
-
-GFMReader = sys.modules['pelican-gfm.gfm'].GFMReader
-
-METADATA_RE = re.compile(r'\[{\s*(?P<meta>[-._:a-zA-Z0-9\[\]]+)\s*}\]')
-
-
-class ASFTemplateReader(ezt.Reader):
-    """Enables inserts relative to the template we loaded."""
-
-    def __init__(self, source_path, text):
-        self.source_dir, self.fname = os.path.split(source_path)
-        self.text = text
-
-    def read_other(self, relative):
-        return ezt._FileReader(os.path.join(self.source_dir, relative))
-
-    def filename(self):
-        return self.fname
-
-
-class ASFReader(GFMReader):
-    """GFM-flavored Reader for the Pelican system that adds ASF data and ezt
-    generation prior to processing the GFM
-    """
-
-    def add_data(self, text, metadata):
-        "Mix in ASF data as metadata"
-
-        asf_metadata = self.settings.get('ASF_DATA', { }).get('metadata')
-        if asf_metadata:
-            metadata.update(asf_metadata)
-            # insert any direct references
-            m = 1
-            while m:
-                m = METADATA_RE.search(text)
-                if m:
-                    this_data = m.group(1).strip()
-                    format_string = '{{{0}}}'.format(this_data)
-                    try:
-                        new_string = format_string.format(**metadata)
-                        print(f'{{{{{m.group(1)}}}}} -> {new_string}')
-                    except Exception:
-                        # the data expression was not found
-                        new_string = format_string
-                        print(f'{{{{{m.group(1)}}}}} is not found')
-                    text = re.sub(METADATA_RE, new_string, text, count=1)
-        return text, metadata
-
-    def read(self, source_path):
-        "Read metadata and content, process content as ezt template, then render into HTML."
-        try:
-            # read content with embedded ezt - use GFMReader
-            text, metadata = super().read_source(source_path)
-            assert text
-            assert metadata
-            # supplement metadata with ASFData if available
-            text, metadata = self.add_data(text, metadata)
-            # prepare text as an ezt template
-            # compress_whitespace=0 is required as blank lines and indentation have meaning in markdown.
-            template = ezt.Template(compress_whitespace=0)
-            reader = ASFTemplateReader(source_path, text)
-            template.parse(reader, base_format=ezt.FORMAT_HTML)
-            assert template
-            # generate content from ezt template with metadata
-            fp = io.StringIO()
-            template.generate(fp, metadata)
-            # Render the markdown into HTML
-            content = super().render(fp.getvalue().encode('utf-8')).decode('utf-8')
-            assert content
-        except Exception:
-            print('-----', file=sys.stderr)
-            print('ERROR: %s' % (source_path), file=sys.stderr)
-            traceback.print_exc()
-            raise
-
-        return content, metadata
-
-
-# The following are required or ezmd files are not read instead they are static.
-# For direct subclasses of BaseReader like GFMReader the following two
-# callables are optional if the class includes enabled=True and file_extenaions.
-def add_readers(readers):
-    readers.reader_classes['ezmd'] = ASFReader
-
-
-def register():
-    pelican.plugins.signals.readers_init.connect(add_readers)
diff --git a/theme/plugins/asfshell.py b/theme/plugins/asfshell.py
deleted file mode 100644
index 57101e8..0000000
--- a/theme/plugins/asfshell.py
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/python -B
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# asfshell.py - Pelican plugin that runs shell scripts during initialization
-#
-
-import sys
-import subprocess
-import shlex
-import traceback
-
-import pelican.plugins.signals
-import pelican.settings
-
-
-# open a subprocess
-def os_run(args):
-    return subprocess.Popen(args, stdout=subprocess.PIPE, universal_newlines=True)
-
-
-# run shell
-def run_shell(pel_ob):
-    asf_shell = pel_ob.settings.get('ASF_SHELL')
-    if asf_shell:
-        print('-----\nasfshell')
-        for command in asf_shell:
-            print(f'-----\n{command}')
-            args = shlex.split(command)
-            print(args)
-            with os_run(args) as s:
-                for line in s.stdout:
-                    line = line.strip()
-                    print(f'{line}')
-
-
-def tb_initialized(pel_ob):
-    """ Print any exception, before Pelican chews it into nothingness."""
-    try:
-        run_shell(pel_ob)
-    except Exception:
-        print('-----', file=sys.stderr)
-        traceback.print_exc()
-        # exceptions here stop the build
-        raise
-
-
-def register():
-    pelican.plugins.signals.initialized.connect(tb_initialized)