Index: /ValBot/Bash/drive_https_upgrade.sh
===================================================================
--- /ValBot/Bash/drive_https_upgrade.sh	(revision 1169)
+++ /ValBot/Bash/drive_https_upgrade.sh	(revision 1169)
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+IFS="
+"
+
+CORE="/path/to/Pywikibot/core"
+SUMMARY="changing link from http->https"
+RATE=6
+FIX_START=551
+FIX_END=650
+
+cd "$CORE"
+if [ ! -f "pwb.py" ]; then
+   echo "drive_https_upgrade.sh: Can't launch Pywikibot!"
+   exit
+fi
+
+echo "drive_https_upgrade.sh: Starting at fix $FIX_START..."
+
+FIX_CUR=0
+LAST_RUN=0
+for THE_LINE in `cat "/path/to/ValExtLinks report.txt"`; do
+   if [[ $THE_LINE =~ .*http-\>https.* ]]; then
+      let FIX_CUR+=1
+      if [ $FIX_CUR -lt $FIX_START ]; then
+         continue
+      fi
+      if [ $FIX_CUR -gt $FIX_END ]; then
+         echo "drive_https_upgrade.sh: Stopped after fix $FIX_END."
+         exit
+      fi
+
+      # Wait for rate limit to expire if we have run the Python script before in this session
+      if [ $LAST_RUN -gt 0 ]; then
+         CUR_TIME=$(date +%s)
+         WAIT_REMAINDER=$(($RATE - $CUR_TIME + $LAST_RUN))
+         if [ $WAIT_REMAINDER -gt 0 ]; then
+            echo "drive_https_upgrade.sh: Waiting $WAIT_REMAINDER second(s)."
+            sleep $WAIT_REMAINDER
+         fi
+      fi
+      ON_PAGE=${THE_LINE#*page \'}
+      ON_PAGE=${ON_PAGE%%\'*}
+      FROM_LINK=${THE_LINE#*URL \'}
+      FROM_LINK=${FROM_LINK%%\'*}
+      TO_LINK=${THE_LINE%\'*}
+      TO_LINK=${TO_LINK##*\'}
+      LAST_RUN=$(date +%s)
+      python pwb.py replace -page:"$ON_PAGE" "$FROM_LINK" "$TO_LINK" -summary:"$SUMMARY" -always
+   fi
+done
Index: /ValBot/Bash/drive_slash_adding.sh
===================================================================
--- /ValBot/Bash/drive_slash_adding.sh	(revision 1169)
+++ /ValBot/Bash/drive_slash_adding.sh	(revision 1169)
@@ -0,0 +1,62 @@
+#!/bin/bash
+
+IFS="
+"
+
+CORE="/path/to/Pywikibot/core"
+SUMMARY="added ending slash to URL and/or upgrading http to https to satisfy redirect"
+RATE=6
+FIX_START=0
+FIX_END=0
+
+cd "$CORE"
+if [ ! -f "pwb.py" ]; then
+   echo "drive_slash_adding.sh: Can't launch Pywikibot!"
+   exit
+fi
+
+echo "drive_slash_adding.sh: Starting at fix $FIX_START..."
+
+FIX_CUR=0
+LAST_RUN=0
+for THE_LINE in `cat "/path/to/ValExtLinks report.txt"`; do
+   #echo "drive_slash_adding.sh: Considering '$THE_LINE'..."
+   if [[ "$THE_LINE" =~ .*trailing.* ]] && [[ ! "$THE_LINE" =~ .*w/index.php.* ]]; then
+      #echo "drive_slash_adding.sh: This URL needs to be fixed."
+      let FIX_CUR+=1
+
+      if [ $FIX_CUR -lt $FIX_START ]; then
+         continue
+      fi
+
+      if [ $FIX_END -gt 0 ] && [ $FIX_CUR -gt $FIX_END ]; then
+         echo "drive_slash_adding.sh: Stopped after fix $FIX_END."
+         exit
+      fi      
+
+      # Wait for rate limit to expire if we have run the Python script before in this session
+      if [ $LAST_RUN -gt 0 ]; then
+         CUR_TIME=$(date +%s)
+         WAIT_REMAINDER=$(($RATE - $CUR_TIME + $LAST_RUN))
+         if [ $WAIT_REMAINDER -gt 0 ]; then
+            echo "drive_slash_adding.sh: Waiting $WAIT_REMAINDER second(s)."
+            sleep $WAIT_REMAINDER
+         fi
+      fi
+      ON_PAGE=${THE_LINE#*page \'}
+      ON_PAGE=${ON_PAGE%%\'*}
+      FROM_LINK=${THE_LINE#*URL \'}
+      FROM_LINK=${FROM_LINK%%\'*}
+      TO_LINK=${THE_LINE%\'*}
+      TO_LINK=${TO_LINK##*\'}
+
+      #if [[ "$THE_LINE" =~ ${FROM_LINK}[^a-zA-Z/] ]]; then
+      #   echo "URL is not isolated, skipping."
+      #   continue
+      #fi
+
+      LAST_RUN=$(date +%s)
+      echo "pwb.by replace '-page:\"$ON_PAGE\" \"$FROM_LINK\" \"$TO_LINK\""
+      python pwb.py replace -page:"$ON_PAGE" "$FROM_LINK" "$TO_LINK" -summary:"$SUMMARY"
+   fi
+done
Index: /ValBot/Docs/Generator and replace options.txt
===================================================================
--- /ValBot/Docs/Generator and replace options.txt	(revision 1169)
+++ /ValBot/Docs/Generator and replace options.txt	(revision 1169)
@@ -0,0 +1,503 @@
+
+This bot will make direct text replacements.
+
+It will retrieve information on which pages might need changes either from
+an XML dump or a text file, or only change a single page.
+
+These command line parameters can be used to specify which pages to work on:
+
+GENERATOR OPTIONS
+=================
+
+-cat                Work on all pages which are in a specific category.
+                    Argument can also be given as "-cat:categoryname" or
+                    as "-cat:categoryname|fromtitle" (using # instead of |
+                    is also allowed in this one and the following)
+
+-catr               Like -cat, but also recursively includes pages in
+                    subcategories, sub-subcategories etc. of the
+                    given category.
+                    Argument can also be given as "-catr:categoryname" or
+                    as "-catr:categoryname|fromtitle".
+
+-subcats            Work on all subcategories of a specific category.
+                    Argument can also be given as "-subcats:categoryname" or
+                    as "-subcats:categoryname|fromtitle".
+
+-subcatsr           Like -subcats, but also includes sub-subcategories etc. of
+                    the given category.
+                    Argument can also be given as "-subcatsr:categoryname" or
+                    as "-subcatsr:categoryname|fromtitle".
+
+-uncat              Work on all pages which are not categorised.
+
+-uncatcat           Work on all categories which are not categorised.
+
+-uncatfiles         Work on all files which are not categorised.
+
+-file               Read a list of pages to treat from the named text file.
+                    Page titles in the file may be either enclosed with
+                    [[brackets]], or be separated by new lines.
+                    Argument can also be given as "-file:filename".
+
+-filelinks          Work on all pages that use a certain image/media file.
+                    Argument can also be given as "-filelinks:filename".
+
+-search             Work on all pages that are found in a MediaWiki search
+                    across all namespaces.
+
+-logevents          Work on articles that were on a specified Special:Log.
+                    The value may be a comma separated list of these values:
+
+                        logevent,username,start,end
+
+                    or for backward compatibility:
+
+                        logevent,username,total
+
+                    Note: 'start' is the most recent date and log events are
+                    iterated from present to past. If 'start'' is not provided,
+                    it means 'now'; if 'end' is not provided, it means 'since
+                    the beginning'.
+
+                    To use the default value, use an empty string.
+                    You have options for every type of logs given by the
+                    log event parameter which could be one of the following:
+
+                        spamblacklist, titleblacklist, gblblock, renameuser,
+                        globalauth, gblrights, gblrename, abusefilter,
+                        massmessage, thanks, usermerge, block, protect, rights,
+                        delete, upload, move, import, patrol, merge, suppress,
+                        tag, managetags, contentmodel, review, stable,
+                        timedmediahandler, newusers
+
+                    It uses the default number of pages 10.
+
+                    Examples:
+
+                    -logevents:move gives pages from move log (usually
+                    redirects)
+                    -logevents:delete,,20 gives 20 pages from deletion log
+                    -logevents:protect,Usr gives pages from protect log by user
+                    Usr
+                    -logevents:patrol,Usr,20 gives 20 patrolled pages by Usr
+                    -logevents:upload,,20121231,20100101 gives upload pages
+                    in the 2010s, 2011s, and 2012s
+                    -logevents:review,,20121231 gives review pages since the
+                    beginning till the 31 Dec 2012
+                    -logevents:review,Usr,20121231 gives review pages by user
+                    Usr since the beginning till the 31 Dec 2012
+
+                    In some cases it must be given as -logevents:"move,Usr,20"
+
+-interwiki          Work on the given page and all equivalent pages in other
+                    languages. This can, for example, be used to fight
+                    multi-site spamming.
+                    Attention: this will cause the bot to modify
+                    pages on several wiki sites, this is not well tested,
+                    so check your edits!
+
+-links              Work on all pages that are linked from a certain page.
+                    Argument can also be given as "-links:linkingpagetitle".
+
+-liverecentchanges  Work on pages from the live recent changes feed. If used as
+                    -liverecentchanges:x, work on x recent changes.
+
+-imagesused         Work on all images that contained on a certain page.
+                    Can also be given as "-imagesused:linkingpagetitle".
+
+-newimages          Work on the most recent new images. If given as
+                    -newimages:x, will work on x newest images.
+
+-newpages           Work on the most recent new pages. If given as -newpages:x,
+                    will work on x newest pages.
+
+-recentchanges      Work on the pages with the most recent changes. If
+                    given as -recentchanges:x, will work on the x most recently
+                    changed pages. If given as -recentchanges:offset,duration
+                    it will work on pages changed from 'offset' minutes with
+                    'duration' minutes of timespan. rctags are supported too.
+                    The rctag must be the very first parameter part.
+
+                    Examples:
+
+                    -recentchanges:20 gives the 20 most recently changed pages
+                    -recentchanges:120,70 will give pages with 120 offset
+                    minutes and 70 minutes of timespan
+                    -recentchanges:visualeditor,10 gives the 10 most recently
+                    changed pages marked with 'visualeditor'
+                    -recentchanges:"mobile edit,60,35" will retrieve pages
+                    marked with 'mobile edit' for the given offset and timespan
+
+-unconnectedpages   Work on the most recent unconnected pages to the Wikibase
+                    repository. Given as -unconnectedpages:x, will work on the
+                    x most recent unconnected pages.
+
+-ref                Work on all pages that link to a certain page.
+                    Argument can also be given as "-ref:referredpagetitle".
+
+-start              Specifies that the robot should go alphabetically through
+                    all pages on the home wiki, starting at the named page.
+                    Argument can also be given as "-start:pagetitle".
+
+                    You can also include a namespace. For example,
+                    "-start:Template:!" will make the bot work on all pages
+                    in the template namespace.
+
+                    default value is start:!
+
+-prefixindex        Work on pages commencing with a common prefix.
+
+-transcludes        Work on all pages that use a certain template.
+                    Argument can also be given as "-transcludes:Title".
+
+-unusedfiles        Work on all description pages of images/media files that
+                    are not used anywhere.
+                    Argument can be given as "-unusedfiles:n" where
+                    n is the maximum number of articles to work on.
+
+-lonelypages        Work on all articles that are not linked from any other
+                    article.
+                    Argument can be given as "-lonelypages:n" where
+                    n is the maximum number of articles to work on.
+
+-unwatched          Work on all articles that are not watched by anyone.
+                    Argument can be given as "-unwatched:n" where
+                    n is the maximum number of articles to work on.
+
+-property:name      Work on all pages with a given property name from
+                    Special:PagesWithProp.
+
+-usercontribs       Work on all articles that were edited by a certain user.
+                    (Example : -usercontribs:DumZiBoT)
+
+-weblink            Work on all articles that contain an external link to
+                    a given URL; may be given as "-weblink:url"
+
+-withoutinterwiki   Work on all pages that don't have interlanguage links.
+                    Argument can be given as "-withoutinterwiki:n" where
+                    n is the total to fetch.
+
+-mysqlquery         Takes a MySQL query string like
+                    "SELECT page_namespace, page_title FROM page
+                    WHERE page_namespace = 0" and treats
+                    the resulting pages. See
+                    https://www.mediawiki.org/wiki/Manual:Pywikibot/MySQL
+                    for more details.
+
+-sparql             Takes a SPARQL SELECT query string including ?item
+                    and works on the resulting pages.
+
+-sparqlendpoint     Specify SPARQL endpoint URL (optional).
+                    (Example : -sparqlendpoint:http://myserver.com/sparql)
+
+-searchitem         Takes a search string and works on Wikibase pages that
+                    contain it.
+                    Argument can be given as "-searchitem:text", where text
+                    is the string to look for, or "-searchitem:lang:text",
+                    where lang is the language to search items in.
+
+-wantedpages        Work on pages that are linked, but do not exist;
+                    may be given as "-wantedpages:n" where n is the maximum
+                    number of articles to work on.
+
+-wantedcategories   Work on categories that are used, but do not exist;
+                    may be given as "-wantedcategories:n" where n is the
+                    maximum number of categories to work on.
+
+-wantedfiles        Work on files that are used, but do not exist;
+                    may be given as "-wantedfiles:n" where n is the maximum
+                    number of files to work on.
+
+-wantedtemplates    Work on templates that are used, but do not exist;
+                    may be given as "-wantedtemplates:n" where n is the
+                    maximum number of templates to work on.
+
+-random             Work on random pages returned by [[Special:Random]].
+                    Can also be given as "-random:n" where n is the number
+                    of pages to be returned.
+
+-randomredirect     Work on random redirect pages returned by
+                    [[Special:RandomRedirect]]. Can also be given as
+                    "-randomredirect:n" where n is the number of pages to be
+                    returned.
+
+-google             Work on all pages that are found in a Google search.
+                    You need a Google Web API license key. Note that Google
+                    doesn't give out license keys anymore. See google_key in
+                    config.py for instructions.
+                    Argument can also be given as "-google:searchstring".
+
+-page               Work on a single page. Argument can also be given as
+                    "-page:pagetitle", and supplied multiple times for
+                    multiple pages.
+
+-pageid             Work on a single pageid. Argument can also be given as
+                    "-pageid:pageid1,pageid2,." or
+                    "-pageid:'pageid1|pageid2|..'"
+                    and supplied multiple times for multiple pages.
+
+-linter             Work on pages that contain lint errors. Extension Linter
+                    must be available on the site.
+                    -linter select all categories.
+                    -linter:high, -linter:medium or -linter:low select all
+                    categories for that prio.
+                    Single categories can be selected with commas as in
+                    -linter:cat1,cat2,cat3
+
+                    Adding '/int' identifies Lint ID to start querying from:
+                    e.g. -linter:high/10000
+
+                    -linter:show just shows available categories.
+
+-querypage:name     Work on pages provided by a QueryPage-based special page,
+                    see https://www.mediawiki.org/wiki/API:Querypage.
+                    (tip: use -limit:n to fetch only n pages).
+
+                    -querypage shows special pages available.
+
+
+FILTER OPTIONS
+==============
+
+-catfilter          Filter the page generator to only yield pages in the
+                    specified category. See -cat generator for argument format.
+
+-grep               A regular expression that needs to match the article
+                    otherwise the page won't be returned.
+                    Multiple -grep:regexpr can be provided and the page will
+                    be returned if content is matched by any of the regexpr
+                    provided.
+                    Case insensitive regular expressions will be used and
+                    dot matches any character, including a newline.
+
+-grepnot            Like -grep, but return the page only if the regular
+                    expression does not match.
+
+-intersect          Work on the intersection of all the provided generators.
+
+-limit              When used with any other argument -limit:n specifies a set
+                    of pages, work on no more than n pages in total.
+
+-namespaces         Filter the page generator to only yield pages in the
+-namespace          specified namespaces. Separate multiple namespace
+-ns                 numbers or names with commas.
+
+                    Examples:
+
+                    -ns:0,2,4
+                    -ns:Help,MediaWiki
+
+                    You may use a preleading "not" to exclude the namespace.
+
+                    Examples:
+
+                    -ns:not:2,3
+                    -ns:not:Help,File
+
+                    If used with -newpages/-random/-randomredirect/linter
+                    generators, -namespace/ns must be provided before
+                    -newpages/-random/-randomredirect/linter.
+                    If used with -recentchanges generator, efficiency is
+                    improved if -namespace is provided before -recentchanges.
+
+                    If used with -start generator, -namespace/ns shall contain
+                    only one value.
+
+-onlyif             A claim the page needs to contain, otherwise the item won't
+                    be returned.
+                    The format is property=value,qualifier=value. Multiple (or
+                    none) qualifiers can be passed, separated by commas.
+
+                    Examples:
+
+                    P1=Q2 (property P1 must contain value Q2),
+                    P3=Q4,P5=Q6,P6=Q7 (property P3 with value Q4 and
+                    qualifiers: P5 with value Q6 and P6 with value Q7).
+                    Value can be page ID, coordinate in format:
+                    latitude,longitude[,precision] (all values are in decimal
+                    degrees), year, or plain string.
+                    The argument can be provided multiple times and the item
+                    page will be returned only if all claims are present.
+                    Argument can be also given as "-onlyif:expression".
+
+-onlyifnot          A claim the page must not contain, otherwise the item won't
+                    be returned.
+                    For usage and examples, see -onlyif above.
+
+-ql                 Filter pages based on page quality.
+                    This is only applicable if contentmodel equals
+                    'proofread-page', otherwise has no effects.
+                    Valid values are in range 0-4.
+                    Multiple values can be comma-separated.
+
+-subpage            -subpage:n filters pages to only those that have depth n
+                    i.e. a depth of 0 filters out all pages that are subpages,
+                    and a depth of 1 filters out all pages that are subpages of
+                    subpages.
+
+
+-titleregex         A regular expression that needs to match the article title
+                    otherwise the page won't be returned.
+                    Multiple -titleregex:regexpr can be provided and the page
+                    will be returned if title is matched by any of the regexpr
+                    provided.
+                    Case insensitive regular expressions will be used and
+                    dot matches any character.
+
+-titleregexnot      Like -titleregex, but return the page only if the regular
+                    expression does not match.
+
+Furthermore, the following command line parameters are supported:
+
+-mysqlquery       Retrieve information from a local database mirror.
+                  If no query specified, bot searches for pages with
+                  given replacements.
+
+-xml              Retrieve information from a local XML dump
+                  (pages-articles or pages-meta-current, see
+                  https://dumps.wikimedia.org). Argument can also
+                  be given as "-xml:filename".
+
+-regex            Make replacements using regular expressions. If this argument
+                  isn't given, the bot will make simple text replacements.
+
+-nocase           Use case insensitive regular expressions.
+
+-dotall           Make the dot match any character at all, including a newline.
+                  Without this flag, '.' will match anything except a newline.
+
+-multiline        '^' and '$' will now match begin and end of each line.
+
+-xmlstart         (Only works with -xml) Skip all articles in the XML dump
+                  before the one specified (may also be given as
+                  -xmlstart:Article).
+
+-addcat:cat_name  Adds "cat_name" category to every altered page.
+
+-excepttitle:XYZ  Skip pages with titles that contain XYZ. If the -regex
+                  argument is given, XYZ will be regarded as a regular
+                  expression.
+
+-requiretitle:XYZ Only do pages with titles that contain XYZ. If the -regex
+                  argument is given, XYZ will be regarded as a regular
+                  expression.
+
+-excepttext:XYZ   Skip pages which contain the text XYZ. If the -regex
+                  argument is given, XYZ will be regarded as a regular
+                  expression.
+
+-exceptinside:XYZ Skip occurrences of the to-be-replaced text which lie
+                  within XYZ. If the -regex argument is given, XYZ will be
+                  regarded as a regular expression.
+
+-exceptinsidetag:XYZ Skip occurrences of the to-be-replaced text which lie
+                  within an XYZ tag.
+
+-summary:XYZ      Set the summary message text for the edit to XYZ, bypassing
+                  the predefined message texts with original and replacements
+                  inserted. Can't be used with -automaticsummary.
+
+-automaticsummary Uses an automatic summary for all replacements which don't
+                  have a summary defined. Can't be used with -summary.
+
+-sleep:123        If you use -fix you can check multiple regex at the same time
+                  in every page. This can lead to a great waste of CPU because
+                  the bot will check every regex without waiting using all the
+                  resources. This will slow it down between a regex and another
+                  in order not to waste too much CPU.
+
+-fix:XYZ          Perform one of the predefined replacements tasks, which are
+                  given in the dictionary 'fixes' defined inside the files
+                  fixes.py and user-fixes.py.
+
+                  Currently available predefined fixes are:
+
+                  * HTML        - Convert HTML tags to wiki syntax, and
+                                  fix XHTML.
+                  * isbn        - Fix badly formatted ISBNs.
+                  * syntax      - Try to fix bad wiki markup. Do not run
+                                  this in automatic mode, as the bot may
+                                  make mistakes.
+                  * syntax-safe - Like syntax, but less risky, so you can
+                                  run this in automatic mode.
+                  * case-de     - fix upper/lower case errors in German
+                  * grammar-de  - fix grammar and typography in German
+                  * vonbis      - Ersetze Binde-/Gedankenstrich durch "bis"
+                                  in German
+                  * music       - Links auf Begriffsklärungen in German
+                  * datum       - specific date formats in German
+                  * correct-ar  - Typo corrections for Arabic Wikipedia and any
+                                  Arabic wiki.
+                  * yu-tld      - Fix links to .yu domains because it is
+                                  disabled, see:
+                                  https://lists.wikimedia.org/pipermail/wikibots-l/2009-February/000290.html
+                  * fckeditor   - Try to convert FCKeditor HTML tags to wiki
+                                  syntax.
+
+-manualinput      Request manual replacements via the command line input even
+                  if replacements are already defined. If this option is set
+                  (or no replacements are defined via -fix or the arguments)
+                  it'll ask for additional replacements at start.
+
+-pairsfile        Lines from the given file name(s) will be read as replacement
+                  arguments. i.e. a file containing lines "a" and "b", used as:
+
+                      python pwb.py replace -page:X -pairsfile:file c d
+
+                  will replace 'a' with 'b' and 'c' with 'd'.
+
+-always           Don't prompt you for each replacement
+
+-recursive        Recurse replacement as long as possible. Be careful, this
+                  might lead to an infinite loop.
+
+-allowoverlap     When occurrences of the pattern overlap, replace all of them.
+                  Be careful, this might lead to an infinite loop.
+
+-fullsummary      Use one large summary for all command line replacements.
+
+other:            First argument is the old text, second argument is the new
+                  text. If the -regex argument is given, the first argument
+                  will be regarded as a regular expression, and the second
+                  argument might contain expressions like \1 or \g<name>.
+                  It is possible to introduce more than one pair of old text
+                  and replacement.
+
+Examples
+--------
+
+If you want to change templates from the old syntax, e.g. {{msg:Stub}}, to the
+new syntax, e.g. {{Stub}}, download an XML dump file (pages-articles) from
+https://dumps.wikimedia.org, then use this command:
+
+    python pwb.py replace -xml -regex "{{msg:(.*?)}}" "{{\1}}"
+
+If you have a dump called foobar.xml and want to fix typos in articles, e.g.
+Errror -> Error, use this:
+
+    python pwb.py replace -xml:foobar.xml "Errror" "Error" -namespace:0
+
+If you want to do more than one replacement at a time, use this:
+
+    python pwb.py replace -xml:foobar.xml "Errror" "Error" "Faail" "Fail" \
+        -namespace:0
+
+If you have a page called 'John Doe' and want to fix the format of ISBNs, use:
+
+    python pwb.py replace -page:John_Doe -fix:isbn
+
+This command will change 'referer' to 'referrer', but not in pages which
+talk about HTTP, where the typo has become part of the standard:
+
+    python pwb.py replace referer referrer -file:typos.txt -excepttext:HTTP
+
+Please type "python pwb.py replace -help | more" if you can't read
+the top of the help.
+
+
+GLOBAL OPTIONS
+==============
+For global options use -help:global or run pwb.py -help
+
+
Index: /ValBot/Python/check_interwiki_links.py
===================================================================
--- /ValBot/Python/check_interwiki_links.py	(revision 1169)
+++ /ValBot/Python/check_interwiki_links.py	(revision 1169)
@@ -0,0 +1,135 @@
+import os
+
+from urllib.parse import urljoin
+
+import pywikibot
+import re
+
+from pywikibot.bot import QuitKeyboardInterrupt
+from pywikibot import pagegenerators
+from pywikibot.tools.formatter import color_format
+from pywikibot.comms.http import fetch
+from pywikibot.specialbots import UploadRobot
+from bs4 import BeautifulSoup
+
+# Parallel arrays based on https://wiki.oni2.net/Special:Interwiki
+interwiki_prefixes = ('acronym', 'cache', 'commons', 'dictionary', 'google', 'metawikimedia', 'mw', 'wikibooks', 'wikidata', 'wikimedia', 'wikinews', 'wikipedia', 'wikiquote', 'wikisource', 'wikispecies', 'wikiversity', 'wikivoyage', 'wikt', 'wiktionary', 'wp')
+
+interwiki_urls = ('http://www.acronymfinder.com/~/search/af.aspx?string=exact&Acronym=', 'http://www.google.com/search?q=cache:', 'https://commons.wikimedia.org/wiki/', 'http://www.dict.org/bin/Dict?Database=*&Form=Dict1&Strategy=*&Query=', 'http://www.google.com/search?q=', 'https://meta.wikimedia.org/wiki/', 'https://www.mediawiki.org/wiki/', 'https://en.wikibooks.org/wiki/', 'https://www.wikidata.org/wiki/', 'https://foundation.wikimedia.org/wiki/', 'https://en.wikinews.org/wiki/', 'https://en.wikipedia.org/wiki/', 'https://en.wikiquote.org/wiki/', 'https://wikisource.org/wiki/', 'https://species.wikimedia.org/wiki/', 'https://en.wikiversity.org/wiki/', 'https://en.wikivoyage.org/wiki/', 'https://en.wiktionary.org/wiki/', 'https://en.wiktionary.org/wiki/', 'https://en.wikipedia.org/wiki/')
+
+pages_checked = 0
+iw_found = 0
+problems_found = 0
+
+# Searches the given page text for interwiki links
+def scan_for_iw_links(page_text):
+    global pages_checked
+    global iw_found
+    global problems_found
+    pages_checked = pages_checked + 1
+    cur = 0
+
+    for prefix in interwiki_prefixes:
+        # Isolate strings that start with "[[prefix:" and end with "|" or "]"
+        iw_link = "\[\[" + prefix + ":[^|\]]*(\||\])"
+        for match in re.finditer(iw_link, page_text):
+            # Extract just the page title from this regex match
+            s = match.start() + 2 + len(prefix) + 1
+            e = match.end() - 1
+
+            # Sometimes we used a space char. instead of a '_', so fix that before querying
+            page_title = page_text[s:e].replace(' ', '_')
+
+            # Construct full URL for the particular wiki
+            iw_url = interwiki_urls[cur] + page_title
+            pywikibot.output('Found {0} link {1}.'.format(prefix, page_title))
+            iw_found = iw_found + 1
+
+            # Adjust URL if this is a foreign-language WP link
+            if re.match("^[a-zA-Z]{2}:", page_title):
+                lang_code = page_title[0:2] + "."
+                # "wp:" is the Wikipedia: namespace, not a language
+                if lang_code != "wp." and lang_code != "WP.":
+                    iw_url = iw_url.replace('en.', lang_code)
+                    iw_url = iw_url.replace(page_title[0:3], '')
+
+            # Test the URL
+            #pywikibot.output('Testing URL {}...'.format(iw_url))
+            response = fetch(iw_url)
+
+            # Redirects are followed automatically by fetch() and treated as "200"s, so the
+            # way we tell that a redirect occurred is by checking the history
+            if response.history != []:
+                pywikibot.output('WARNING: Redirected from {}.'.format(response.history))
+                problems_found = problems_found + 1
+            elif response.status_code != 200:
+                #pywikibot.output('WARNING: Got response code {}.'.format(response.status_code)) # commented out because fetch() already prints such a msg
+                problems_found = problems_found + 1
+            elif '#' in page_title:
+                # Isolate section link
+                pywikibot.output('Detected section link on page {0}.'.format(page_title))
+                page_name, anchor_name = page_title.split('#')
+                
+                # Convert dot-notation hex entities to proper characters
+                anchor_name = anchor_name.replace('.22', '"')
+                anchor_name = anchor_name.replace('.27', '\'')
+                anchor_name = anchor_name.replace('.28', '(')
+                anchor_name = anchor_name.replace('.29', ')')
+                
+                # Read linked page to see if it really has this anchor link
+                soup = BeautifulSoup(response.text, 'html.parser')
+                found_section = False
+                for tag in soup.findAll('a'):
+                    link = tag.get('href', None)
+                    if not link:
+                        #pywikibot.output('It is not a link.')
+                        continue
+                    #pywikibot.output('Got link {0}.'.format(link))
+                    if not link.startswith('#'):
+                        continue
+                        
+                    if link == '#' + anchor_name:
+                        pywikibot.output('Found section link!')
+                        found_section = True
+                        break
+                if found_section == False:
+                    pywikibot.output('Could not find section {0} on page {1}.'.format(anchor_name, page_name))
+                    problems_found = problems_found + 1
+        cur = cur + 1
+
+def main(*args):
+    cat_name = ''
+    page_name = ''
+
+    local_args = pywikibot.handle_args(args)
+    genFactory = pagegenerators.GeneratorFactory()
+
+    for arg in local_args:
+        if arg.startswith('-cat:'):
+            cat_name = arg[5:]
+        elif arg.startswith('-page:'):
+            page_name = arg[6:]
+
+    site = pywikibot.Site()
+
+    # This line of code enumerates the methods in the 'page' class
+    #pywikibot.stdout(format(dir(page)))
+
+    if cat_name != '':
+        cat_obj = pywikibot.Category(site, cat_name)
+        generator = pagegenerators.CategorizedPageGenerator(cat_obj, recurse=True)
+        for page in pagegenerators.PreloadingGenerator(generator, 100):
+            pywikibot.stdout('Checking page {0}'.format(page.title()))
+            scan_for_iw_links(page.text)
+    elif page_name != '':
+        page = pywikibot.Page(site, page_name)
+        pywikibot.stdout('Checking page {0}'.format(page.title()))
+        scan_for_iw_links(page.text)
+
+    global pages_checked
+    global iw_found
+    global problems_found
+    pywikibot.stdout('Checked {0} page(s) and found {1} interwiki link(s) with {2} problem(s).'.format(pages_checked, iw_found, problems_found))
+
+if __name__ == '__main__':
+    main()
Index: /ValBot/Python/check_intrawiki_section_links.py
===================================================================
--- /ValBot/Python/check_intrawiki_section_links.py	(revision 1169)
+++ /ValBot/Python/check_intrawiki_section_links.py	(revision 1169)
@@ -0,0 +1,200 @@
+import os
+
+from urllib.parse import urljoin
+
+import pywikibot
+import re
+
+from pywikibot.bot import QuitKeyboardInterrupt
+from pywikibot import pagegenerators
+from pywikibot.tools.formatter import color_format
+from pywikibot.comms.http import fetch
+from pywikibot.specialbots import UploadRobot
+from bs4 import BeautifulSoup
+
+# Array of OniGalore's namespaces
+intrawiki_prefixes = ('Image', 'Special', 'Talk', 'User', 'User_talk', 'OniGalore', 'OniGalore_talk', 'File', 'File_talk', 'MediaWiki', 'MediaWiki_talk', 'Template', 'Template_talk', 'Help', 'Help_talk', 'Category', 'Category_talk', 'BSL', 'BSL_talk', 'OBD', 'OBD_talk', 'AE', 'AE_talk', 'Oni2', 'Oni2_talk', 'XML', 'XML_talk')
+
+# URL for main namespace of our wiki
+onigalore_url = 'https://wiki.oni2.net/'
+
+# Interwiki prefixes, for ruling out these links
+interwiki_prefixes = ('acronym', 'cache', 'commons', 'dictionary', 'google', 'metawikimedia', 'mw', 'wikibooks', 'wikidata', 'wikimedia', 'wikinews', 'wikipedia', 'wikiquote', 'wikisource', 'wikispecies', 'wikiversity', 'wikivoyage', 'wikt', 'wiktionary', 'wp')
+
+pages_checked = 0
+iw_found = 0
+problems_found = 0
+page_name = ''
+
+# Searches the given page text for intrawiki links with section links in them
+def scan_for_iw_links(page_text):
+    global pages_checked
+    global iw_found
+    global problems_found
+    global page_name
+    pages_checked = pages_checked + 1
+
+    # Isolate strings of pattern "[[anything]]", "[[any:thing]]", "[[any|thing]]" or
+    # "[[any:thi|ng]]"
+    iw_link = "\[\[[^|\]]*(\||\])"
+    for match in re.finditer(iw_link, page_text):
+        found_iw_match = False
+        iw_url = ""
+        page_name2 = page_name
+    
+        # Cut out the matched text from the page, and in the process remove the "[[" from the
+        # front and the "|" or "]" from the end
+        s = match.start() + 2
+        e = match.end() - 1
+        link_text = page_text[s:e]
+
+        # Sometimes we used a space char. instead of a '_', so fix that before querying
+        link_text = link_text.replace(' ', '_')
+        #pywikibot.output('Found link {0}.'.format(link_text))
+        
+        # If this link doesn't have a section link in it, then we don't care about it, as
+        # MediaWiki takes care of checking basic intrawiki links
+        if not '#' in link_text:
+            #pywikibot.output('Link doesn\'t have a section anchor in it. Skipping.')
+            continue
+        
+        # If there is a '{' in the link, then probably it's a link built on transcluded text
+        # like "Quotes/Diary#{{C3}}", which we cannot expand and work with, so skip it
+        if '{' in link_text:
+            pywikibot.output('ADVICE: Link {} seems to use transclusion, so it can\'t be verified automatically. You should check it manually.'.format(link_text))
+            continue
+        
+        # If this is a relative "../" link, find the parent page and set ourselves to that
+        # page, then remove the relative portion of the link. Note that this is only performed
+        # once, so if there's multiple steps back ("../../"), we're out of luck.
+        if link_text.startswith('../'):
+            last_slash = page_name.rfind('/')
+            page_name2 = page_name[0:last_slash]
+            #pywikibot.output('Changed page_name to {} on account of "../".'.format(page_name2))
+            link_text = link_text[3:len(link_text)]
+            #pywikibot.output('Changed link_text to {} on account of "../".'.format(link_text))
+            # If this is now going to be a bare section link for the parent page, don't add
+            # a slash, otherwise do because we are drilling down to another subpage
+            if link_text.startswith('#'):
+                link_text = page_name2 + link_text
+            else:
+                link_text = page_name2 + '/' + link_text
+            
+        # If this is a bare section link, build URL based on this page
+        if link_text.startswith('#'):
+            iw_url = onigalore_url + page_name2
+            iw_found = iw_found + 1
+            #pywikibot.output('Found link to this very page, {}.'.format(link_text))
+            found_iw_match = True
+            link_text = page_name2 + link_text
+        
+        # If there's no ":" in the link (before the section link, where a colon would just be
+        # part of the text) then it's a Main namespace article, so construct URL
+        #if not ':' in link_text:
+        if found_iw_match == False:
+            if not re.search(":.*#", link_text):
+                iw_url = onigalore_url + link_text
+                iw_found = iw_found + 1
+                #pywikibot.output('Found link to OniGalore Main namespace page {}.'.format(link_text))
+                found_iw_match = True
+            
+        # If there is a ":", match the prefix against the intrawiki prefixes on OniGalore
+        if found_iw_match == False:
+            for prefix in intrawiki_prefixes:
+                #pywikibot.output('Comparing link against prefix {}.'.format(prefix))
+                if prefix + ":" in link_text:
+                    iw_url = onigalore_url + link_text
+                    _, post_ns = link_text.split(':', 1)
+                    #pywikibot.output('Found link to OniGalore {0} namespace page {1}.'.format(prefix, post_ns))
+                    iw_found = iw_found + 1
+                    found_iw_match = True
+                    break
+        
+        # If we didn't match the prefix against any intrawiki prefixes, see if it matches
+        # against an interwiki prefix; if so, this link can be ignored
+        is_interwiki = False
+        if found_iw_match == False:
+            for prefix in interwiki_prefixes:
+                if prefix + ":" in link_text:
+                    #pywikibot.output('Skipping link {} because it is an interwiki link.'.format(link_text))
+                    is_interwiki = True
+                    break
+        if is_interwiki:
+            continue
+        
+        # If we still haven't turned this match into a URL, something's gone wrong
+        if (found_iw_match == False) or (iw_url == ""):
+            pywikibot.output('ERROR: Couldn\'t figure out link {}. Aborting script.'.format(link_text))
+            quit()
+
+        # Test the URL
+        iw_url = iw_url.replace(' ', '_')
+        #pywikibot.output('Reading page at {}...'.format(iw_url))
+        response = fetch(iw_url)
+
+        # Redirects are followed automatically by fetch() and treated as "200"s, so the
+        # way we tell that a redirect occurred is by checking the history
+        if response.history != []:
+            pywikibot.output('WARNING: Redirected from {}.'.format(response.history))
+            problems_found = problems_found + 1
+        elif response.status_code != 200:
+            #pywikibot.output('WARNING: Got response code {}.'.format(response.status_code)) # commented out because fetch() already prints such a msg
+            problems_found = problems_found + 1
+        else:
+            # Isolate section link
+            pre_section, section_name = link_text.split('#', 1)
+            #pywikibot.output('Searching for section link {} on page.'.format(section_name))
+            
+            # Convert slash character to the dot-notation hex encoding that MediaWiki uses
+            section_name = section_name.replace('/', '.2F')
+            
+            # Read linked page to see if it really has this anchor link
+            soup = BeautifulSoup(response.text, 'html.parser')
+            found_section = False
+            for span_tag in soup.findAll('span'):
+                span_name = span_tag.get('id', None)
+                if span_name == section_name:
+                    #pywikibot.output('Found section!')
+                    found_section = True
+                    break
+            if found_section == False:
+                pywikibot.output('ERROR: Could not find section {0} on page {1}!'.format(section_name, pre_section))
+                problems_found = problems_found + 1
+
+def main(*args):
+    cat_name = ''
+    global page_name
+
+    local_args = pywikibot.handle_args(args)
+    genFactory = pagegenerators.GeneratorFactory()
+
+    for arg in local_args:
+        if arg.startswith('-cat:'):
+            cat_name = arg[5:]
+        elif arg.startswith('-page:'):
+            page_name = arg[6:]
+
+    site = pywikibot.Site()
+
+    # This line of code enumerates the methods in the 'page' class
+    #pywikibot.stdout(format(dir(page)))
+
+    if cat_name != '':
+        cat_obj = pywikibot.Category(site, cat_name)
+        generator = pagegenerators.CategorizedPageGenerator(cat_obj, recurse=True)
+        for page in pagegenerators.PreloadingGenerator(generator, 100):
+            pywikibot.stdout('Checking page {0}'.format(page.title()))
+            page_name = page.title()
+            scan_for_iw_links(page.text)
+    elif page_name != '':
+        page = pywikibot.Page(site, page_name)
+        pywikibot.stdout('Checking page {0}'.format(page.title()))
+        scan_for_iw_links(page.text)
+
+    global pages_checked
+    global iw_found
+    global problems_found
+    pywikibot.stdout('Checked {0} page(s) and found {1} intrawiki link(s) with {2} section link problem(s).'.format(pages_checked, iw_found, problems_found))
+
+if __name__ == '__main__':
+    main()
Index: /ValBot/Python/find_external_images.py
===================================================================
--- /ValBot/Python/find_external_images.py	(revision 1169)
+++ /ValBot/Python/find_external_images.py	(revision 1169)
@@ -0,0 +1,89 @@
+import os
+
+from urllib.parse import urljoin
+
+import pywikibot
+
+from pywikibot.bot import QuitKeyboardInterrupt
+from pywikibot import pagegenerators
+from pywikibot.comms.http import fetch
+from pywikibot.specialbots import UploadRobot
+from bs4 import BeautifulSoup
+
+first_run = False
+pages_checked = 0
+oni2_images = 0
+file_formats = ('.jpg', '.jpeg', '.png', '.gif', '.svg')
+
+# Scrapes the HTML at the given URL for image tags
+def get_image_links(url, shown):
+    links = []
+    global oni2_images
+    global pages_checked
+
+    response = fetch(url)
+    if response.status_code != 200:
+        pywikibot.output('Skipping url: {}'.format(url))
+        return links
+
+    soup = BeautifulSoup(response.text, 'html.parser')
+    pages_checked = pages_checked + 1
+    if not shown:
+        tagname = 'a'
+    elif shown == 'just':
+        tagname = 'img'
+    else:
+        tagname = ['a', 'img']
+    #pywikibot.output('Looking at tags.')
+    for tag in soup.findAll(tagname):
+        link = tag.get('src', tag.get('href', None))
+        if not link:
+            #pywikibot.output('It is not a link.')
+            continue
+        #pywikibot.output('Got link {0}.'.format(link))
+        _, ext = os.path.splitext(link)
+        if ext.lower() in file_formats:
+            pywikibot.output('Found image link {0}.'.format(ext))
+            if "oni2.net" in link:
+                pywikibot.stdout('Found an oni2.net image: {0}'.format(link))
+                oni2_images = oni2_images + 1
+    return links
+
+
+def main(*args):
+    cat = ''
+    url = ''
+    image_url = False
+    shown = False
+    desc = []
+
+    local_args = pywikibot.handle_args(args)
+    genFactory = pagegenerators.GeneratorFactory()
+
+    for arg in local_args:
+        if arg.startswith('-cat:'):
+            cat = arg[5:]
+        elif arg == '-shown':
+            shown = True
+        elif arg == '-justshown':
+            shown = 'just'
+        elif url == '':
+            url = arg
+        else:
+            desc += [arg]
+    desc = ' '.join(desc)
+
+    site = pywikibot.Site()
+    cat_obj = pywikibot.Category(site, cat)
+    generator = pagegenerators.CategorizedPageGenerator(cat_obj, recurse=True)
+    for page in pagegenerators.PreloadingGenerator(generator, 100):
+        pywikibot.stdout('Checking page {0}'.format(page.title()))
+        page_url = page.full_url().replace("%2F", "/")
+        get_image_links(page_url, shown)
+
+    global pages_checked
+    global oni2_images
+    pywikibot.stdout('Checked {0} page(s) and found {1} image(s) from oni2.net.'.format(pages_checked, oni2_images))
+
+if __name__ == '__main__':
+    main()
Index: /ValBot/Python/uncapitalize_image_suffixes.py
===================================================================
--- /ValBot/Python/uncapitalize_image_suffixes.py	(revision 1169)
+++ /ValBot/Python/uncapitalize_image_suffixes.py	(revision 1169)
@@ -0,0 +1,57 @@
+import os
+import pywikibot
+
+from pywikibot.bot import QuitKeyboardInterrupt
+from pywikibot import pagegenerators
+
+def main(*args):
+    genFactory = pagegenerators.GeneratorFactory()
+    #allowed_suffixes = ('jpg', 'jpeg', 'png', 'gif', 'svg')
+    allowed_suffixes = []
+    images_checked = 0
+    capped_suffixes = 0
+    site = pywikibot.Site()
+
+    try:
+        allowed_suffixes = site.siteinfo.get('fileextensions', get_default=False)
+    except KeyError:
+        pywikibot.stdout('Failed to get the wiki\'s allowed image suffixes!')
+        return
+    else:
+        allowed_suffixes = [item['ext'].lower() for item in allowed_suffixes]
+        pywikibot.stdout('Wiki accepts image suffixes {0}.'.format(allowed_suffixes))
+
+    generator = site.allimages()
+    pywikibot.stdout('Looking for images with capitalized suffixes...')
+    for page in pagegenerators.PreloadingGenerator(generator, 100):
+        images_checked = images_checked + 1
+        suffix = page.title().split('.')[-1]
+        if suffix.lower() in allowed_suffixes:
+            if suffix != suffix.lower():
+                # We found a page which has an allowed suffix but which is capitalized
+                pywikibot.stdout('{0}'.format(page.title()))
+                capped_suffixes = capped_suffixes + 1
+
+                # Rename page to have lowercase suffix
+                new_page_title = os.path.splitext(page.title())[0] + '.' + suffix.lower()
+                pywikibot.stdout('Moving page to {0}...'.format(new_page_title))
+                page.move(new_page_title, reason='use lowercase file suffix', movetalk=True, noredirect=True)
+
+                # Warn the user if the page we moved had references to it
+                wiki_links = []
+                wiki_links = list(page.backlinks())
+                file_links = []
+                file_links = list(page.usingPages())
+                if len(wiki_links) or len(file_links):
+                    pywikibot.stdout('However this page is referenced by:')
+                    for ref in wiki_links:
+                        pywikibot.stdout('    {0}'.format(ref.title()))
+                    for ref in file_links:
+                        pywikibot.stdout('    {0}'.format(ref.title()))
+        else:
+            pywikibot.stdout('Found disallowed suffix {0}!'.format(suffix)) # should never happen
+
+    pywikibot.stdout('Checked {0} images and found {1} with capitalized suffixes.'.format(images_checked, capped_suffixes))
+
+if __name__ == '__main__':
+    main()
Index: lBot/check_interwiki_links.py
===================================================================
--- /ValBot/check_interwiki_links.py	(revision 1168)
+++ 	(revision )
@@ -1,135 +1,0 @@
-import os
-
-from urllib.parse import urljoin
-
-import pywikibot
-import re
-
-from pywikibot.bot import QuitKeyboardInterrupt
-from pywikibot import pagegenerators
-from pywikibot.tools.formatter import color_format
-from pywikibot.comms.http import fetch
-from pywikibot.specialbots import UploadRobot
-from bs4 import BeautifulSoup
-
-# Parallel arrays based on https://wiki.oni2.net/Special:Interwiki
-interwiki_prefixes = ('acronym', 'cache', 'commons', 'dictionary', 'google', 'metawikimedia', 'mw', 'wikibooks', 'wikidata', 'wikimedia', 'wikinews', 'wikipedia', 'wikiquote', 'wikisource', 'wikispecies', 'wikiversity', 'wikivoyage', 'wikt', 'wiktionary', 'wp')
-
-interwiki_urls = ('http://www.acronymfinder.com/~/search/af.aspx?string=exact&Acronym=', 'http://www.google.com/search?q=cache:', 'https://commons.wikimedia.org/wiki/', 'http://www.dict.org/bin/Dict?Database=*&Form=Dict1&Strategy=*&Query=', 'http://www.google.com/search?q=', 'https://meta.wikimedia.org/wiki/', 'https://www.mediawiki.org/wiki/', 'https://en.wikibooks.org/wiki/', 'https://www.wikidata.org/wiki/', 'https://foundation.wikimedia.org/wiki/', 'https://en.wikinews.org/wiki/', 'https://en.wikipedia.org/wiki/', 'https://en.wikiquote.org/wiki/', 'https://wikisource.org/wiki/', 'https://species.wikimedia.org/wiki/', 'https://en.wikiversity.org/wiki/', 'https://en.wikivoyage.org/wiki/', 'https://en.wiktionary.org/wiki/', 'https://en.wiktionary.org/wiki/', 'https://en.wikipedia.org/wiki/')
-
-pages_checked = 0
-iw_found = 0
-problems_found = 0
-
-# Searches the given page text for interwiki links
-def scan_for_iw_links(page_text):
-    global pages_checked
-    global iw_found
-    global problems_found
-    pages_checked = pages_checked + 1
-    cur = 0
-
-    for prefix in interwiki_prefixes:
-        # Isolate strings that start with "[[prefix:" and end with "|" or "]"
-        iw_link = "\[\[" + prefix + ":[^|\]]*(\||\])"
-        for match in re.finditer(iw_link, page_text):
-            # Extract just the page title from this regex match
-            s = match.start() + 2 + len(prefix) + 1
-            e = match.end() - 1
-
-            # Sometimes we used a space char. instead of a '_', so fix that before querying
-            page_title = page_text[s:e].replace(' ', '_')
-
-            # Construct full URL for the particular wiki
-            iw_url = interwiki_urls[cur] + page_title
-            pywikibot.output('Found {0} link {1}.'.format(prefix, page_title))
-            iw_found = iw_found + 1
-
-            # Adjust URL if this is a foreign-language WP link
-            if re.match("^[a-zA-Z]{2}:", page_title):
-                lang_code = page_title[0:2] + "."
-                # "wp:" is the Wikipedia: namespace, not a language
-                if lang_code != "wp." and lang_code != "WP.":
-                    iw_url = iw_url.replace('en.', lang_code)
-                    iw_url = iw_url.replace(page_title[0:3], '')
-
-            # Test the URL
-            #pywikibot.output('Testing URL {}...'.format(iw_url))
-            response = fetch(iw_url)
-
-            # Redirects are followed automatically by fetch() and treated as "200"s, so the
-            # way we tell that a redirect occurred is by checking the history
-            if response.history != []:
-                pywikibot.output('WARNING: Redirected from {}.'.format(response.history))
-                problems_found = problems_found + 1
-            elif response.status_code != 200:
-                #pywikibot.output('WARNING: Got response code {}.'.format(response.status_code)) # commented out because fetch() already prints such a msg
-                problems_found = problems_found + 1
-            elif '#' in page_title:
-                # Isolate section link
-                pywikibot.output('Detected section link on page {0}.'.format(page_title))
-                page_name, anchor_name = page_title.split('#')
-                
-                # Convert dot-notation hex entities to proper characters
-                anchor_name = anchor_name.replace('.22', '"')
-                anchor_name = anchor_name.replace('.27', '\'')
-                anchor_name = anchor_name.replace('.28', '(')
-                anchor_name = anchor_name.replace('.29', ')')
-                
-                # Read linked page to see if it really has this anchor link
-                soup = BeautifulSoup(response.text, 'html.parser')
-                found_section = False
-                for tag in soup.findAll('a'):
-                    link = tag.get('href', None)
-                    if not link:
-                        #pywikibot.output('It is not a link.')
-                        continue
-                    #pywikibot.output('Got link {0}.'.format(link))
-                    if not link.startswith('#'):
-                        continue
-                        
-                    if link == '#' + anchor_name:
-                        pywikibot.output('Found section link!')
-                        found_section = True
-                        break
-                if found_section == False:
-                    pywikibot.output('Could not find section {0} on page {1}.'.format(anchor_name, page_name))
-                    problems_found = problems_found + 1
-        cur = cur + 1
-
-def main(*args):
-    cat_name = ''
-    page_name = ''
-
-    local_args = pywikibot.handle_args(args)
-    genFactory = pagegenerators.GeneratorFactory()
-
-    for arg in local_args:
-        if arg.startswith('-cat:'):
-            cat_name = arg[5:]
-        elif arg.startswith('-page:'):
-            page_name = arg[6:]
-
-    site = pywikibot.Site()
-
-    # This line of code enumerates the methods in the 'page' class
-    #pywikibot.stdout(format(dir(page)))
-
-    if cat_name != '':
-        cat_obj = pywikibot.Category(site, cat_name)
-        generator = pagegenerators.CategorizedPageGenerator(cat_obj, recurse=True)
-        for page in pagegenerators.PreloadingGenerator(generator, 100):
-            pywikibot.stdout('Checking page {0}'.format(page.title()))
-            scan_for_iw_links(page.text)
-    elif page_name != '':
-        page = pywikibot.Page(site, page_name)
-        pywikibot.stdout('Checking page {0}'.format(page.title()))
-        scan_for_iw_links(page.text)
-
-    global pages_checked
-    global iw_found
-    global problems_found
-    pywikibot.stdout('Checked {0} page(s) and found {1} interwiki link(s) with {2} problem(s).'.format(pages_checked, iw_found, problems_found))
-
-if __name__ == '__main__':
-    main()
Index: lBot/check_intrawiki_section_links.py
===================================================================
--- /ValBot/check_intrawiki_section_links.py	(revision 1168)
+++ 	(revision )
@@ -1,200 +1,0 @@
-import os
-
-from urllib.parse import urljoin
-
-import pywikibot
-import re
-
-from pywikibot.bot import QuitKeyboardInterrupt
-from pywikibot import pagegenerators
-from pywikibot.tools.formatter import color_format
-from pywikibot.comms.http import fetch
-from pywikibot.specialbots import UploadRobot
-from bs4 import BeautifulSoup
-
-# Array of OniGalore's namespaces
-intrawiki_prefixes = ('Image', 'Special', 'Talk', 'User', 'User_talk', 'OniGalore', 'OniGalore_talk', 'File', 'File_talk', 'MediaWiki', 'MediaWiki_talk', 'Template', 'Template_talk', 'Help', 'Help_talk', 'Category', 'Category_talk', 'BSL', 'BSL_talk', 'OBD', 'OBD_talk', 'AE', 'AE_talk', 'Oni2', 'Oni2_talk', 'XML', 'XML_talk')
-
-# URL for main namespace of our wiki
-onigalore_url = 'https://wiki.oni2.net/'
-
-# Interwiki prefixes, for ruling out these links
-interwiki_prefixes = ('acronym', 'cache', 'commons', 'dictionary', 'google', 'metawikimedia', 'mw', 'wikibooks', 'wikidata', 'wikimedia', 'wikinews', 'wikipedia', 'wikiquote', 'wikisource', 'wikispecies', 'wikiversity', 'wikivoyage', 'wikt', 'wiktionary', 'wp')
-
-pages_checked = 0
-iw_found = 0
-problems_found = 0
-page_name = ''
-
-# Searches the given page text for intrawiki links with section links in them
-def scan_for_iw_links(page_text):
-    global pages_checked
-    global iw_found
-    global problems_found
-    global page_name
-    pages_checked = pages_checked + 1
-
-    # Isolate strings of pattern "[[anything]]", "[[any:thing]]", "[[any|thing]]" or
-    # "[[any:thi|ng]]"
-    iw_link = "\[\[[^|\]]*(\||\])"
-    for match in re.finditer(iw_link, page_text):
-        found_iw_match = False
-        iw_url = ""
-        page_name2 = page_name
-    
-        # Cut out the matched text from the page, and in the process remove the "[[" from the
-        # front and the "|" or "]" from the end
-        s = match.start() + 2
-        e = match.end() - 1
-        link_text = page_text[s:e]
-
-        # Sometimes we used a space char. instead of a '_', so fix that before querying
-        link_text = link_text.replace(' ', '_')
-        #pywikibot.output('Found link {0}.'.format(link_text))
-        
-        # If this link doesn't have a section link in it, then we don't care about it, as
-        # MediaWiki takes care of checking basic intrawiki links
-        if not '#' in link_text:
-            #pywikibot.output('Link doesn\'t have a section anchor in it. Skipping.')
-            continue
-        
-        # If there is a '{' in the link, then probably it's a link built on transcluded text
-        # like "Quotes/Diary#{{C3}}", which we cannot expand and work with, so skip it
-        if '{' in link_text:
-            pywikibot.output('ADVICE: Link {} seems to use transclusion, so it can\'t be verified automatically. You should check it manually.'.format(link_text))
-            continue
-        
-        # If this is a relative "../" link, find the parent page and set ourselves to that
-        # page, then remove the relative portion of the link. Note that this is only performed
-        # once, so if there's multiple steps back ("../../"), we're out of luck.
-        if link_text.startswith('../'):
-            last_slash = page_name.rfind('/')
-            page_name2 = page_name[0:last_slash]
-            #pywikibot.output('Changed page_name to {} on account of "../".'.format(page_name2))
-            link_text = link_text[3:len(link_text)]
-            #pywikibot.output('Changed link_text to {} on account of "../".'.format(link_text))
-            # If this is now going to be a bare section link for the parent page, don't add
-            # a slash, otherwise do because we are drilling down to another subpage
-            if link_text.startswith('#'):
-                link_text = page_name2 + link_text
-            else:
-                link_text = page_name2 + '/' + link_text
-            
-        # If this is a bare section link, build URL based on this page
-        if link_text.startswith('#'):
-            iw_url = onigalore_url + page_name2
-            iw_found = iw_found + 1
-            #pywikibot.output('Found link to this very page, {}.'.format(link_text))
-            found_iw_match = True
-            link_text = page_name2 + link_text
-        
-        # If there's no ":" in the link (before the section link, where a colon would just be
-        # part of the text) then it's a Main namespace article, so construct URL
-        #if not ':' in link_text:
-        if found_iw_match == False:
-            if not re.search(":.*#", link_text):
-                iw_url = onigalore_url + link_text
-                iw_found = iw_found + 1
-                #pywikibot.output('Found link to OniGalore Main namespace page {}.'.format(link_text))
-                found_iw_match = True
-            
-        # If there is a ":", match the prefix against the intrawiki prefixes on OniGalore
-        if found_iw_match == False:
-            for prefix in intrawiki_prefixes:
-                #pywikibot.output('Comparing link against prefix {}.'.format(prefix))
-                if prefix + ":" in link_text:
-                    iw_url = onigalore_url + link_text
-                    _, post_ns = link_text.split(':', 1)
-                    #pywikibot.output('Found link to OniGalore {0} namespace page {1}.'.format(prefix, post_ns))
-                    iw_found = iw_found + 1
-                    found_iw_match = True
-                    break
-        
-        # If we didn't match the prefix against any intrawiki prefixes, see if it matches
-        # against an interwiki prefix; if so, this link can be ignored
-        is_interwiki = False
-        if found_iw_match == False:
-            for prefix in interwiki_prefixes:
-                if prefix + ":" in link_text:
-                    #pywikibot.output('Skipping link {} because it is an interwiki link.'.format(link_text))
-                    is_interwiki = True
-                    break
-        if is_interwiki:
-            continue
-        
-        # If we still haven't turned this match into a URL, something's gone wrong
-        if (found_iw_match == False) or (iw_url == ""):
-            pywikibot.output('ERROR: Couldn\'t figure out link {}. Aborting script.'.format(link_text))
-            quit()
-
-        # Test the URL
-        iw_url = iw_url.replace(' ', '_')
-        #pywikibot.output('Reading page at {}...'.format(iw_url))
-        response = fetch(iw_url)
-
-        # Redirects are followed automatically by fetch() and treated as "200"s, so the
-        # way we tell that a redirect occurred is by checking the history
-        if response.history != []:
-            pywikibot.output('WARNING: Redirected from {}.'.format(response.history))
-            problems_found = problems_found + 1
-        elif response.status_code != 200:
-            #pywikibot.output('WARNING: Got response code {}.'.format(response.status_code)) # commented out because fetch() already prints such a msg
-            problems_found = problems_found + 1
-        else:
-            # Isolate section link
-            pre_section, section_name = link_text.split('#', 1)
-            #pywikibot.output('Searching for section link {} on page.'.format(section_name))
-            
-            # Convert slash character to the dot-notation hex encoding that MediaWiki uses
-            section_name = section_name.replace('/', '.2F')
-            
-            # Read linked page to see if it really has this anchor link
-            soup = BeautifulSoup(response.text, 'html.parser')
-            found_section = False
-            for span_tag in soup.findAll('span'):
-                span_name = span_tag.get('id', None)
-                if span_name == section_name:
-                    #pywikibot.output('Found section!')
-                    found_section = True
-                    break
-            if found_section == False:
-                pywikibot.output('ERROR: Could not find section {0} on page {1}!'.format(section_name, pre_section))
-                problems_found = problems_found + 1
-
-def main(*args):
-    cat_name = ''
-    global page_name
-
-    local_args = pywikibot.handle_args(args)
-    genFactory = pagegenerators.GeneratorFactory()
-
-    for arg in local_args:
-        if arg.startswith('-cat:'):
-            cat_name = arg[5:]
-        elif arg.startswith('-page:'):
-            page_name = arg[6:]
-
-    site = pywikibot.Site()
-
-    # This line of code enumerates the methods in the 'page' class
-    #pywikibot.stdout(format(dir(page)))
-
-    if cat_name != '':
-        cat_obj = pywikibot.Category(site, cat_name)
-        generator = pagegenerators.CategorizedPageGenerator(cat_obj, recurse=True)
-        for page in pagegenerators.PreloadingGenerator(generator, 100):
-            pywikibot.stdout('Checking page {0}'.format(page.title()))
-            page_name = page.title()
-            scan_for_iw_links(page.text)
-    elif page_name != '':
-        page = pywikibot.Page(site, page_name)
-        pywikibot.stdout('Checking page {0}'.format(page.title()))
-        scan_for_iw_links(page.text)
-
-    global pages_checked
-    global iw_found
-    global problems_found
-    pywikibot.stdout('Checked {0} page(s) and found {1} intrawiki link(s) with {2} section link problem(s).'.format(pages_checked, iw_found, problems_found))
-
-if __name__ == '__main__':
-    main()
Index: lBot/drive_https_upgrade.sh
===================================================================
--- /ValBot/drive_https_upgrade.sh	(revision 1168)
+++ 	(revision )
@@ -1,51 +1,0 @@
-#!/bin/bash
-
-IFS="
-"
-
-CORE="/path/to/Pywikibot/core"
-SUMMARY="changing link from http->https"
-RATE=6
-FIX_START=551
-FIX_END=650
-
-cd "$CORE"
-if [ ! -f "pwb.py" ]; then
-   echo "drive_https_upgrade.sh: Can't launch Pywikibot!"
-   exit
-fi
-
-echo "drive_https_upgrade.sh: Starting at fix $FIX_START..."
-
-FIX_CUR=0
-LAST_RUN=0
-for THE_LINE in `cat "/path/to/ValExtLinks report.txt"`; do
-   if [[ $THE_LINE =~ .*http-\>https.* ]]; then
-      let FIX_CUR+=1
-      if [ $FIX_CUR -lt $FIX_START ]; then
-         continue
-      fi
-      if [ $FIX_CUR -gt $FIX_END ]; then
-         echo "drive_https_upgrade.sh: Stopped after fix $FIX_END."
-         exit
-      fi
-
-      # Wait for rate limit to expire if we have run the Python script before in this session
-      if [ $LAST_RUN -gt 0 ]; then
-         CUR_TIME=$(date +%s)
-         WAIT_REMAINDER=$(($RATE - $CUR_TIME + $LAST_RUN))
-         if [ $WAIT_REMAINDER -gt 0 ]; then
-            echo "drive_https_upgrade.sh: Waiting $WAIT_REMAINDER second(s)."
-            sleep $WAIT_REMAINDER
-         fi
-      fi
-      ON_PAGE=${THE_LINE#*page \'}
-      ON_PAGE=${ON_PAGE%%\'*}
-      FROM_LINK=${THE_LINE#*URL \'}
-      FROM_LINK=${FROM_LINK%%\'*}
-      TO_LINK=${THE_LINE%\'*}
-      TO_LINK=${TO_LINK##*\'}
-      LAST_RUN=$(date +%s)
-      python pwb.py replace -page:"$ON_PAGE" "$FROM_LINK" "$TO_LINK" -summary:"$SUMMARY" -always
-   fi
-done
Index: lBot/drive_slash_adding.sh
===================================================================
--- /ValBot/drive_slash_adding.sh	(revision 1168)
+++ 	(revision )
@@ -1,62 +1,0 @@
-#!/bin/bash
-
-IFS="
-"
-
-CORE="/path/to/Pywikibot/core"
-SUMMARY="added ending slash to URL and/or upgrading http to https to satisfy redirect"
-RATE=6
-FIX_START=0
-FIX_END=0
-
-cd "$CORE"
-if [ ! -f "pwb.py" ]; then
-   echo "drive_slash_adding.sh: Can't launch Pywikibot!"
-   exit
-fi
-
-echo "drive_slash_adding.sh: Starting at fix $FIX_START..."
-
-FIX_CUR=0
-LAST_RUN=0
-for THE_LINE in `cat "/path/to/ValExtLinks report.txt"`; do
-   #echo "drive_slash_adding.sh: Considering '$THE_LINE'..."
-   if [[ "$THE_LINE" =~ .*trailing.* ]] && [[ ! "$THE_LINE" =~ .*w/index.php.* ]]; then
-      #echo "drive_slash_adding.sh: This URL needs to be fixed."
-      let FIX_CUR+=1
-
-      if [ $FIX_CUR -lt $FIX_START ]; then
-         continue
-      fi
-
-      if [ $FIX_END -gt 0 ] && [ $FIX_CUR -gt $FIX_END ]; then
-         echo "drive_slash_adding.sh: Stopped after fix $FIX_END."
-         exit
-      fi      
-
-      # Wait for rate limit to expire if we have run the Python script before in this session
-      if [ $LAST_RUN -gt 0 ]; then
-         CUR_TIME=$(date +%s)
-         WAIT_REMAINDER=$(($RATE - $CUR_TIME + $LAST_RUN))
-         if [ $WAIT_REMAINDER -gt 0 ]; then
-            echo "drive_slash_adding.sh: Waiting $WAIT_REMAINDER second(s)."
-            sleep $WAIT_REMAINDER
-         fi
-      fi
-      ON_PAGE=${THE_LINE#*page \'}
-      ON_PAGE=${ON_PAGE%%\'*}
-      FROM_LINK=${THE_LINE#*URL \'}
-      FROM_LINK=${FROM_LINK%%\'*}
-      TO_LINK=${THE_LINE%\'*}
-      TO_LINK=${TO_LINK##*\'}
-
-      #if [[ "$THE_LINE" =~ ${FROM_LINK}[^a-zA-Z/] ]]; then
-      #   echo "URL is not isolated, skipping."
-      #   continue
-      #fi
-
-      LAST_RUN=$(date +%s)
-      echo "pwb.by replace '-page:\"$ON_PAGE\" \"$FROM_LINK\" \"$TO_LINK\""
-      python pwb.py replace -page:"$ON_PAGE" "$FROM_LINK" "$TO_LINK" -summary:"$SUMMARY"
-   fi
-done
Index: lBot/find_external_images.py
===================================================================
--- /ValBot/find_external_images.py	(revision 1168)
+++ 	(revision )
@@ -1,89 +1,0 @@
-import os
-
-from urllib.parse import urljoin
-
-import pywikibot
-
-from pywikibot.bot import QuitKeyboardInterrupt
-from pywikibot import pagegenerators
-from pywikibot.comms.http import fetch
-from pywikibot.specialbots import UploadRobot
-from bs4 import BeautifulSoup
-
-first_run = False
-pages_checked = 0
-oni2_images = 0
-file_formats = ('.jpg', '.jpeg', '.png', '.gif', '.svg')
-
-# Scrapes the HTML at the given URL for image tags
-def get_image_links(url, shown):
-    links = []
-    global oni2_images
-    global pages_checked
-
-    response = fetch(url)
-    if response.status_code != 200:
-        pywikibot.output('Skipping url: {}'.format(url))
-        return links
-
-    soup = BeautifulSoup(response.text, 'html.parser')
-    pages_checked = pages_checked + 1
-    if not shown:
-        tagname = 'a'
-    elif shown == 'just':
-        tagname = 'img'
-    else:
-        tagname = ['a', 'img']
-    #pywikibot.output('Looking at tags.')
-    for tag in soup.findAll(tagname):
-        link = tag.get('src', tag.get('href', None))
-        if not link:
-            #pywikibot.output('It is not a link.')
-            continue
-        #pywikibot.output('Got link {0}.'.format(link))
-        _, ext = os.path.splitext(link)
-        if ext.lower() in file_formats:
-            pywikibot.output('Found image link {0}.'.format(ext))
-            if "oni2.net" in link:
-                pywikibot.stdout('Found an oni2.net image: {0}'.format(link))
-                oni2_images = oni2_images + 1
-    return links
-
-
-def main(*args):
-    cat = ''
-    url = ''
-    image_url = False
-    shown = False
-    desc = []
-
-    local_args = pywikibot.handle_args(args)
-    genFactory = pagegenerators.GeneratorFactory()
-
-    for arg in local_args:
-        if arg.startswith('-cat:'):
-            cat = arg[5:]
-        elif arg == '-shown':
-            shown = True
-        elif arg == '-justshown':
-            shown = 'just'
-        elif url == '':
-            url = arg
-        else:
-            desc += [arg]
-    desc = ' '.join(desc)
-
-    site = pywikibot.Site()
-    cat_obj = pywikibot.Category(site, cat)
-    generator = pagegenerators.CategorizedPageGenerator(cat_obj, recurse=True)
-    for page in pagegenerators.PreloadingGenerator(generator, 100):
-        pywikibot.stdout('Checking page {0}'.format(page.title()))
-        page_url = page.full_url().replace("%2F", "/")
-        get_image_links(page_url, shown)
-
-    global pages_checked
-    global oni2_images
-    pywikibot.stdout('Checked {0} page(s) and found {1} image(s) from oni2.net.'.format(pages_checked, oni2_images))
-
-if __name__ == '__main__':
-    main()
Index: lBot/uncapitalize_image_suffixes.py
===================================================================
--- /ValBot/uncapitalize_image_suffixes.py	(revision 1168)
+++ 	(revision )
@@ -1,57 +1,0 @@
-import os
-import pywikibot
-
-from pywikibot.bot import QuitKeyboardInterrupt
-from pywikibot import pagegenerators
-
-def main(*args):
-    genFactory = pagegenerators.GeneratorFactory()
-    #allowed_suffixes = ('jpg', 'jpeg', 'png', 'gif', 'svg')
-    allowed_suffixes = []
-    images_checked = 0
-    capped_suffixes = 0
-    site = pywikibot.Site()
-
-    try:
-        allowed_suffixes = site.siteinfo.get('fileextensions', get_default=False)
-    except KeyError:
-        pywikibot.stdout('Failed to get the wiki\'s allowed image suffixes!')
-        return
-    else:
-        allowed_suffixes = [item['ext'].lower() for item in allowed_suffixes]
-        pywikibot.stdout('Wiki accepts image suffixes {0}.'.format(allowed_suffixes))
-
-    generator = site.allimages()
-    pywikibot.stdout('Looking for images with capitalized suffixes...')
-    for page in pagegenerators.PreloadingGenerator(generator, 100):
-        images_checked = images_checked + 1
-        suffix = page.title().split('.')[-1]
-        if suffix.lower() in allowed_suffixes:
-            if suffix != suffix.lower():
-                # We found a page which has an allowed suffix but which is capitalized
-                pywikibot.stdout('{0}'.format(page.title()))
-                capped_suffixes = capped_suffixes + 1
-
-                # Rename page to have lowercase suffix
-                new_page_title = os.path.splitext(page.title())[0] + '.' + suffix.lower()
-                pywikibot.stdout('Moving page to {0}...'.format(new_page_title))
-                page.move(new_page_title, reason='use lowercase file suffix', movetalk=True, noredirect=True)
-
-                # Warn the user if the page we moved had references to it
-                wiki_links = []
-                wiki_links = list(page.backlinks())
-                file_links = []
-                file_links = list(page.usingPages())
-                if len(wiki_links) or len(file_links):
-                    pywikibot.stdout('However this page is referenced by:')
-                    for ref in wiki_links:
-                        pywikibot.stdout('    {0}'.format(ref.title()))
-                    for ref in file_links:
-                        pywikibot.stdout('    {0}'.format(ref.title()))
-        else:
-            pywikibot.stdout('Found disallowed suffix {0}!'.format(suffix)) # should never happen
-
-    pywikibot.stdout('Checked {0} images and found {1} with capitalized suffixes.'.format(images_checked, capped_suffixes))
-
-if __name__ == '__main__':
-    main()
