Delete /tools/dev/iz. It was used to extract issue handling statistics "for
management" in the tigtis.org days. We have not used IssueZilla/BugZilla for
ages so let's get rid of some dead code.

Discussed: https://lists.apache.org/thread/5fkwrobtygjz4d9po45d8hq5kbmjnqqt


git-svn-id: https://svn.apache.org/repos/asf/subversion/trunk@1913556 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/tools/dev/iz/defect.dem b/tools/dev/iz/defect.dem
deleted file mode 100644
index 7756b7c..0000000
--- a/tools/dev/iz/defect.dem
+++ /dev/null
@@ -1,6 +0,0 @@
-set title "Subversion DEFECT Activity"
-set boxwidth 0.5
-set data style lines
-set key 10, 60
-plot "/tmp/points.found.DEFECT" title "found" with boxes, "/tmp/points.fixed.DEFECT" title "fixed" with boxes, "/tmp/points.avg.DEFECT" title "moving avg", "/tmp/points.open.DEFECT" title "open"
-pause -1 "Hit return to continue"
diff --git a/tools/dev/iz/ff2csv.command b/tools/dev/iz/ff2csv.command
deleted file mode 100755
index 6826e34..0000000
--- a/tools/dev/iz/ff2csv.command
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/sh
-
-# MacOS X do-hickie to run ff2csv.py, with parameters, by double-click.
-
-
-flags="hq"
-Usage () {
-    args="$*"
-    if [[ -n "$args" ]] ; then
-        echo >&2 "$args"
-    fi
-    echo >&2 "Usage: $0 [-$flags] [querysetfile [csvfile]]
-Run ff2csv.py, fetching and summarizing SVN bug status."
-}
-while getopts $flags flag; do
-    case "$flag" in
-        h|q) Usage; exit 0;;
-    esac
-done
-
-# we want to run in the same folder as this script, not
-# the users home folder
-cd `dirname $0`
-
-
-date=`date +%m%d`
-./ff2csv.py ${1:-query-set-1-$date.tsv} ${2:-core-history-$date.csv}
diff --git a/tools/dev/iz/ff2csv.py b/tools/dev/iz/ff2csv.py
deleted file mode 100755
index dca127e..0000000
--- a/tools/dev/iz/ff2csv.py
+++ /dev/null
@@ -1,189 +0,0 @@
-#!/usr/bin/env python
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# -*- Python -*-
-"""Transform find-fix.py output into Excellable csv."""
-
-__date__ = "Time-stamp: <2003-10-16 13:26:27 jrepenning>"[13:30]
-__author__ = "Jack Repenning <jrepenning@collab.net>"
-
-import getopt
-try:
-  my_getopt = getopt.gnu_getopt
-except AttributeError:
-  my_getopt = getopt.getopt
-import inspect
-import os
-import os.path
-import pydoc
-import re
-import shutil
-import string
-import sys
-import time
-
-# Long options and their usage strings; "=" means it takes an argument.
-# To get a list suitable for getopt, just do
-#
-#   [x[0] for x in long_opts]
-#
-# Make sure to sacrifice a lamb to Guido for each element of the list.
-long_opts = [
-    ["doc",             """Optional, print pydocs."""],
-    ["help",            """Optional, print usage (this text)."""],
-    ["verbose",         """Optional, print more progress messages."""],
-    ]
-
-help    = 0
-verbose = 0
-me = os.path.basename(sys.argv[0])
-
-DATA_FILE = "http://subversion.tigris.org/iz-data/query-set-1.tsv"
-
-def main():
-    """Run find-fix.py with arguments du jour for drawing pretty
-manager-speak pictures."""
-
-    global verbose
-
-    try:
-        opts, args = my_getopt(sys.argv[1:], "", [x[0] for x in long_opts])
-    except getopt.GetoptError as e:
-        print("Error: %s" % e.msg)
-        shortusage()
-        print(me + " --help for options.")
-        sys.exit(1)
-
-    for opt, arg in opts:
-        if opt == "--help":
-            usage()
-            sys.exit(0)
-        elif opt == "--verbose":
-            verbose = 1
-        elif opt == "--doc":
-            pydoc.doc(pydoc.importfile(sys.argv[0]))
-            sys.exit(0)
-
-    # do something fruitful with your life
-    if len(args) == 0:
-        args = ["query-set-1.tsv", "core-history.csv"]
-        print(("ff2csv %s %s" % args))
-
-    if len(args) != 2:
-        print("%s: Wrong number of args." % me)
-        shortusage()
-        sys.exit(1)
-
-    if os.system("curl " + DATA_FILE + "> " + args[0]):
-        os.system("wget " + DATA_FILE)
-
-    outfile = open(args[1], "w")
-    outfile.write("Date,found,fixed,inval,dup,other,remain\n")
-
-    totalsre = re.compile("totals:.*found= +([0-9]+) +"
-                          "fixed= +([0-9]+) +"
-                          "inval= +([0-9]+) +"
-                          "dup= +([0-9]+) +"
-                          "other= +([0-9]+) +"
-                          "remain= *([0-9]+)")
-    for year in ("2001", "2002", "2003", "2004"):
-        for month in ("01", "02", "03", "04", "05", "06", "07", "08",
-                      "09", "10", "11", "12"):
-            for dayrange in (("01", "08"),
-                             ("08", "15"),
-                             ("15", "22"),
-                             ("22", "28")):
-                if verbose:
-                    print("searching %s-%s-%s to %s" % (year,
-                                                        month,
-                                                        dayrange[0],
-                                                        dayrange[1]))
-                ffpy = os.popen("python ./find-fix.py --m=beta "
-                                "%s %s-%s-%s %s-%s-%s"
-                                % (args[0],
-                                   year, month, dayrange[0],
-                                   year, month, dayrange[1]))
-                if verbose:
-                    print("ffpy: %s" % ffpy)
-
-                line = ffpy.readline()
-                if verbose:
-                    print("initial line is: %s" % line)
-                matches = totalsre.search(line)
-                if verbose:
-                    print("initial match is: %s" % matches)
-                while line and not matches:
-                    line = ffpy.readline()
-                    if verbose:
-                        print("%s: read line '%s'" % (me, line))
-                    matches = totalsre.search(line)
-                    if verbose:
-                        print("subsequent line is: %s" % line)
-
-                ffpy.close()
-
-                if verbose:
-                    print("line is %s" % line)
-
-                if matches.group(1) != "0" \
-                   or matches.group(2) != "0" \
-                   or matches.group(3) != "0" \
-                   or matches.group(4) != "0" \
-                   or matches.group(5) != "0":
-
-                    outfile.write("%s-%s-%s,%s,%s,%s,%s,%s,%s\n"
-                                  % (year, month, dayrange[1],
-                                     matches.group(1),
-                                     matches.group(2),
-                                     matches.group(3),
-                                     matches.group(4),
-                                     matches.group(5),
-                                     matches.group(6),
-                                     ))
-                elif matches.group(6) != "0":
-                    # quit at first nothing-done week
-                    # allows slop in loop controls
-                    break
-    outfile.close()
-
-
-def shortusage():
-  "Print one-line usage summary."
-  print("%s - %s" % (me, pydoc.synopsis(sys.argv[0])))
-
-def usage():
-  "Print multi-line usage tome."
-  shortusage()
-  print('''%s [opts] [queryfile [outfile]]
-Option keywords may be abbreviated to any unique prefix.
-Option order is not important.
-Most options require "=xxx" arguments:''' % me)
-  for x in long_opts:
-      padding_limit = 18
-      if x[0][-1:] == '=':
-          sys.stdout.write("   --%s " % x[0][:-1])
-          padding_limit = 19
-      else:
-          sys.stdout.write("   --%s " % x[0])
-      print("%s %s" % ((' ' * (padding_limit - len(x[0]))), x[1]))
-
-if __name__ == "__main__":
-  main()
diff --git a/tools/dev/iz/find-fix.py b/tools/dev/iz/find-fix.py
deleted file mode 100755
index 8761b8e..0000000
--- a/tools/dev/iz/find-fix.py
+++ /dev/null
@@ -1,454 +0,0 @@
-#!/usr/bin/env python
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# -*- Python -*-
-"""find-fix.py: produce a find/fix report for Subversion's IZ database
-
-For simple text summary:
-       find-fix.py query-set-1.tsv YYYY-MM-DD YYYY-MM-DD
-Statistics will be printed for bugs found or fixed within the
-time frame.
-
-For gnuplot presentation:
-       find-fix.py query-set-1.tsv outfile
-Gnuplot provides its own way to select date ranges.
-
-Either way, get a query-set-1.tsv from:
-  http://subversion.tigris.org/iz-data/query-set-1.tsv  (updated nightly)
-See http://subversion.tigris.org/iz-data/README for more info on that file.
-
-For more usage info on this script:
-        find-fix.py --help
-"""
-
-_version = "$Revision:"
-
-#
-# This can be run over the data file found at:
-#   http://subversion.tigris.org/iz-data/query-set-1.tsv
-#
-
-import getopt
-try:
-  my_getopt = getopt.gnu_getopt
-except AttributeError:
-  my_getopt = getopt.getopt
-import operator
-import os
-import os.path
-import pydoc
-import re
-try:
-  # Python >=2.6
-  from functools import reduce
-except ImportError:
-  # Python <2.6
-  pass
-import sys
-import time
-
-me = os.path.basename(sys.argv[0])
-
-# Long options and their usage strings; "=" means it takes an argument.
-# To get a list suitable for getopt, just do
-#
-#   [x[0] for x in long_opts]
-#
-# Make sure to sacrifice a lamb to Guido for each element of the list.
-long_opts = [
-  ["milestones=",      """Optional, milestones NOT to report on
-        (one or more of Beta, 1.0, Post-1.0, cvs2svn-1.0, cvs2svn-opt,
-        inapplicable)"""],
-  ["update",          """Optional, update the statistics first."""],
-  ["doc",             """Optional, print pydocs."""],
-  ["help",            """Optional, print usage (this text)."""],
-  ["verbose",         """Optional, print more progress messages."""],
-  ]
-
-help    = 0
-verbose = 0
-update  = 0
-
-DATA_FILE = "http://subversion.tigris.org/iz-data/query-set-1.tsv"
-ONE_WEEK = 7 * 24 * 60 * 60
-
-_types = []
-_milestone_filter = []
-
-noncore_milestone_filter = [
-  'Post-1.0',
-  '1.1',
-  'cvs2svn-1.0',
-  'cvs2svn-opt',
-  'inapplicable',
-  'no milestone',
-  ]
-
-one_point_oh_milestone_filter = noncore_milestone_filter + []
-
-beta_milestone_filter = one_point_oh_milestone_filter + ['1.0']
-
-
-_types = [
-  'DEFECT',
-  'TASK',
-  'FEATURE',
-  'ENHANCEMENT',
-  'PATCH',
-  ]
-
-
-def main():
-  """Report bug find/fix rate statistics for Subversion."""
-
-  global verbose
-  global update
-  global _types
-  global _milestone_filter
-  global noncore_milestone_filter
-
-  try:
-      opts, args = my_getopt(sys.argv[1:], "", [x[0] for x in long_opts])
-  except getopt.GetoptError as e:
-      sys.stderr.write("Error: %s\n" % e.msg)
-      shortusage()
-      sys.stderr.write("%s --help for options.\n" % me)
-      sys.exit(1)
-
-  for opt, arg in opts:
-    if opt == "--help":
-      usage()
-      sys.exit(0)
-    elif opt == "--verbose":
-      verbose = 1
-    elif opt == "--milestones":
-      for mstone in arg.split(","):
-        if mstone == "noncore":
-          _milestone_filter = noncore_milestone_filter
-        elif mstone == "beta":
-          _milestone_filter = beta_milestone_filter
-        elif mstone == "one":
-          _milestone_filter = one_point_oh_milestone_filter
-        elif mstone[0] == '-':
-          if mstone[1:] in _milestone_filter:
-            spot = _milestone_filter.index(mstone[1:])
-            _milestone_filter = _milestone_filter[:spot] \
-                                + _milestone_filter[(spot+1):]
-        else:
-          _milestone_filter += [mstone]
-
-    elif opt == "--update":
-      update = 1
-    elif opt == "--doc":
-      pydoc.doc(pydoc.importfile(sys.argv[0]))
-      sys.exit(0)
-
-  if len(_milestone_filter) == 0:
-    _milestone_filter = noncore_milestone_filter
-
-  if verbose:
-    sys.stderr.write("%s: Filtering out milestones %s.\n"
-                     % (me, ", ".join(_milestone_filter)))
-
-  if len(args) == 2:
-    if verbose:
-      sys.stderr.write("%s: Generating gnuplot data.\n" % me)
-    if update:
-      if verbose:
-        sys.stderr.write("%s: Updating %s from %s.\n" % (me, args[0], DATA_FILE))
-      if os.system("curl " + DATA_FILE + "> " + args[0]):
-        os.system("wget " + DATA_FILE)
-    plot(args[0], args[1])
-
-  elif len(args) == 3:
-    if verbose:
-      sys.stderr.write("%s: Generating summary from %s to %s.\n"
-                       % (me, args[1], args[2]))
-    if update:
-      if verbose:
-        sys.stderr.write("%s: Updating %s from %s.\n" % (me, args[0], DATA_FILE))
-      if os.system("curl " + DATA_FILE + "> " + args[0]):
-        os.system("wget " + DATA_FILE)
-
-    try:
-      t_start = parse_time(args[1] + " 00:00:00")
-    except ValueError:
-      sys.stderr.write('%s: ERROR: bad time value: %s\n' % (me, args[1]))
-      sys.exit(1)
-
-    try:
-      t_end = parse_time(args[2] + " 00:00:00")
-    except ValueError:
-      sys.stderr.write('%s: ERROR: bad time value: %s\n' % (me, args[2]))
-      sys.exit(1)
-
-    summary(args[0], t_start, t_end)
-  else:
-    usage()
-
-  sys.exit(0)
-
-
-def summary(datafile, d_start, d_end):
-  "Prints a summary of activity within a specified date range."
-
-  data = load_data(datafile)
-
-  # activity during the requested period
-  found, fixed, inval, dup, other = extract(data, 1, d_start, d_end)
-
-  # activity from the beginning of time to the end of the request
-  # used to compute remaining
-  # XXX It would be faster to change extract to collect this in one
-  # pass.  But we don't presently have enough data, nor use this
-  # enough, to justify that rework.
-  fromzerofound, fromzerofixed, fromzeroinval, fromzerodup, fromzeroother \
-              = extract(data, 1, 0, d_end)
-
-  alltypes_found = alltypes_fixed = alltypes_inval = alltypes_dup \
-                   = alltypes_other = alltypes_rem = 0
-  for t in _types:
-    fromzerorem_t = fromzerofound[t]\
-                    - (fromzerofixed[t] + fromzeroinval[t] + fromzerodup[t]
-                       + fromzeroother[t])
-    print('%12s: found=%3d  fixed=%3d  inval=%3d  dup=%3d  ' \
-          'other=%3d  remain=%3d' \
-          % (t, found[t], fixed[t], inval[t], dup[t], other[t], fromzerorem_t))
-    alltypes_found = alltypes_found + found[t]
-    alltypes_fixed = alltypes_fixed + fixed[t]
-    alltypes_inval = alltypes_inval + inval[t]
-    alltypes_dup   = alltypes_dup   + dup[t]
-    alltypes_other = alltypes_other + other[t]
-    alltypes_rem   = alltypes_rem + fromzerorem_t
-
-  print('-' * 77)
-  print('%12s: found=%3d  fixed=%3d  inval=%3d  dup=%3d  ' \
-        'other=%3d  remain=%3d' \
-        % ('totals', alltypes_found, alltypes_fixed, alltypes_inval,
-           alltypes_dup, alltypes_other, alltypes_rem))
-  # print '%12s  find/fix ratio: %g%%' \
-  #      % (" "*12, (alltypes_found*100.0/(alltypes_fixed
-  #         + alltypes_inval + alltypes_dup + alltypes_other)))
-
-
-def plot(datafile, outbase):
-  "Generates data files intended for use by gnuplot."
-
-  global _types
-
-  data = load_data(datafile)
-
-  t_min = 1L<<32
-  for issue in data:
-    if issue.created < t_min:
-      t_min = issue.created
-
-  # break the time up into a tuple, then back up to Sunday
-  t_start = time.localtime(t_min)
-  t_start = time.mktime((t_start[0], t_start[1], t_start[2] - t_start[6] - 1,
-                         0, 0, 0, 0, 0, 0))
-
-  plots = { }
-  for t in _types:
-    # for each issue type, we will record per-week stats, compute a moving
-    # average of the find/fix delta, and track the number of open issues
-    plots[t] = [ [ ], MovingAverage(), 0 ]
-
-  week = 0
-  for date in range(t_start, time.time(), ONE_WEEK):
-    ### this is quite inefficient, as we could just sort by date, but
-    ### I'm being lazy
-    found, fixed = extract(data, None, date, date + ONE_WEEK - 1)
-
-    for t in _types:
-      per_week, avg, open_issues = plots[t]
-      delta = found[t] - fixed[t]
-      per_week.append((week, date,
-                       found[t], -fixed[t], avg.add(delta), open_issues))
-      plots[t][2] = open_issues + delta
-
-    week = week + 1
-
-  for t in _types:
-    week_data = plots[t][0]
-    write_file(week_data, outbase, t, 'found', 2)
-    write_file(week_data, outbase, t, 'fixed', 3)
-    write_file(week_data, outbase, t, 'avg', 4)
-    write_file(week_data, outbase, t, 'open', 5)
-
-def write_file(week_data, base, type, tag, idx):
-  f = open('%s.%s.%s' % (base, tag, type), 'w')
-  for info in week_data:
-    f.write('%s %s # %s\n' % (info[0], info[idx], time.ctime(info[1])))
-
-
-class MovingAverage:
-  "Helper class to compute moving averages."
-  def __init__(self, n=4):
-    self.n = n
-    self.data = [ 0 ] * n
-  def add(self, value):
-    self.data.pop(0)
-    self.data.append(float(value) / self.n)
-    return self.avg()
-  def avg(self):
-    return reduce(operator.add, self.data)
-
-
-def extract(data, details, d_start, d_end):
-  """Extract found/fixed counts for each issue type within the data range.
-
-  If DETAILS is false, then return two dictionaries:
-
-    found, fixed
-
-  ...each mapping issue types to the number of issues of that type
-  found or fixed respectively.
-
-  If DETAILS is true, return five dictionaries:
-
-    found, fixed, invalid, duplicate, other
-
-  The first is still the found issues, but the other four break down
-  the resolution into 'FIXED', 'INVALID', 'DUPLICATE', and a grab-bag
-  category for 'WORKSFORME', 'LATER', 'REMIND', and 'WONTFIX'."""
-
-  global _types
-  global _milestone_filter
-
-  found = { }
-  fixed = { }
-  invalid = { }
-  duplicate = { }
-  other = { }  # "WORKSFORME", "LATER", "REMIND", and "WONTFIX"
-
-  for t in _types:
-    found[t] = fixed[t] = invalid[t] = duplicate[t] = other[t] = 0
-
-  for issue in data:
-    # filter out disrespected milestones
-    if issue.milestone in _milestone_filter:
-      continue
-
-    # record the found/fixed counts
-    if d_start <= issue.created <= d_end:
-      found[issue.type] = found[issue.type] + 1
-    if d_start <= issue.resolved <= d_end:
-      if details:
-        if issue.resolution == "FIXED":
-          fixed[issue.type] = fixed[issue.type] + 1
-        elif issue.resolution == "INVALID":
-          invalid[issue.type] = invalid[issue.type] + 1
-        elif issue.resolution == "DUPLICATE":
-          duplicate[issue.type] = duplicate[issue.type] + 1
-        else:
-          other[issue.type] = other[issue.type] + 1
-      else:
-        fixed[issue.type] = fixed[issue.type] + 1
-
-  if details:
-    return found, fixed, invalid, duplicate, other
-  else:
-    return found, fixed
-
-
-def load_data(datafile):
-  "Return a list of Issue objects for the specified data."
-  return list(map(Issue, open(datafile).readlines()))
-
-
-class Issue:
-  "Represents a single issue from the exported IssueZilla data."
-
-  def __init__(self, line):
-    row = line.strip().split('\t')
-
-    self.id = int(row[0])
-    self.type = row[1]
-    self.reporter = row[2]
-    if row[3] == 'NULL':
-      self.assigned = None
-    else:
-      self.assigned = row[3]
-    self.milestone = row[4]
-    self.created = parse_time(row[5])
-    self.resolution = row[7]
-    if not self.resolution:
-      # If the resolution is empty, then force the resolved date to None.
-      # When an issue is reopened, there will still be activity showing
-      # a "RESOLVED", thus we get a resolved date. But we simply want to
-      # ignore that date.
-      self.resolved = None
-    else:
-      self.resolved = parse_time(row[6])
-    self.summary = row[8]
-
-
-parse_time_re = re.compile('([0-9]{4})-([0-9]{2})-([0-9]{2}) '
-                           '([0-9]{2}):([0-9]{2}):([0-9]{2})')
-
-def parse_time(t):
-  "Convert an exported MySQL timestamp into seconds since the epoch."
-
-  global parse_time_re
-
-  if t == 'NULL':
-    return None
-  try:
-    matches = parse_time_re.match(t)
-    return time.mktime((int(matches.group(1)),
-                        int(matches.group(2)),
-                        int(matches.group(3)),
-                        int(matches.group(4)),
-                        int(matches.group(5)),
-                        int(matches.group(6)),
-                        0, 0, -1))
-  except ValueError:
-    sys.stderr.write('ERROR: bad time value: %s\n'% t)
-    sys.exit(1)
-
-def shortusage():
-  print(pydoc.synopsis(sys.argv[0]))
-  print("""
-For simple text summary:
-       find-fix.py [options] query-set-1.tsv YYYY-MM-DD YYYY-MM-DD
-
-For gnuplot presentation:
-       find-fix.py [options] query-set-1.tsv outfile
-""")
-
-def usage():
-  shortusage()
-  for x in long_opts:
-      padding_limit = 18
-      if x[0][-1:] == '=':
-          sys.stdout.write("   --%s " % x[0][:-1])
-          padding_limit = 19
-      else:
-          sys.stdout.write("   --%s " % x[0])
-      print("%s %s" % ((' ' * (padding_limit - len(x[0]))), x[1]))
-  print('''
-Option keywords may be abbreviated to any unique prefix.
-Most options require "=xxx" arguments.
-Option order is not important.''')
-
-if __name__ == '__main__':
-  main()
diff --git a/tools/dev/iz/run-queries.sh b/tools/dev/iz/run-queries.sh
deleted file mode 100755
index 990caf5..0000000
--- a/tools/dev/iz/run-queries.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-if test $# != 3; then
-  echo "USAGE: $0 DATABASE_USER DATABASE_PASSWORD MYSQL_DATABASE"
-  exit 1
-fi
-
-dbuser="$1"
-dbpass="$2"
-dbdb="$3"
-
-q1='select issues.issue_id, issue_type, user1.LOGIN_NAME "reporter",
-       user2.LOGIN_NAME "assigned_to", target_milestone, creation_ts,
-       max(issue_when) "resolved_ts", resolution, short_desc
-  from issues left join issues_activity
-           on issues.issue_id=issues_activity.issue_id and newvalue="RESOLVED",
-       profiles prof1,
-       profiles prof2 left join tigris.HELM_USER user1
-           on user1.USER_ID=prof1.helm_user_id
-         left join tigris.HELM_USER user2
-           on user2.USER_ID=prof2.helm_user_id
-  where prof1.userid=reporter and prof2.userid=assigned_to
-  group by issues.issue_id
-  order by issues.issue_id'
-
-q2='select issues.issue_id, issue_type, user1.LOGIN_NAME "reporter",
-       user2.LOGIN_NAME "assigned_to", target_milestone, creation_ts,
-       max(issue_when) "resolved_ts", resolution, short_desc,
-       priority
-  from issues left join issues_activity
-           on issues.issue_id=issues_activity.issue_id and newvalue="RESOLVED",
-       profiles prof1,
-       profiles prof2 left join tigris.HELM_USER user1
-           on user1.USER_ID=prof1.helm_user_id
-         left join tigris.HELM_USER user2
-           on user2.USER_ID=prof2.helm_user_id
-  where prof1.userid=reporter and prof2.userid=assigned_to
-  group by issues.issue_id
-  order by issues.issue_id'
-
-mysql --batch -e "use $dbdb; $q1" --user=$dbuser --password=$dbpass --silent > iz-data/query-set-1.tsv
-mysql --batch -e "use $dbdb; $q2" --user=$dbuser --password=$dbpass --silent > iz-data/query-set-2.tsv