Index: /ValBot/Python/check_interwiki_links.py
===================================================================
--- /ValBot/Python/check_interwiki_links.py	(revision 1184)
+++ /ValBot/Python/check_interwiki_links.py	(revision 1185)
@@ -13,4 +13,5 @@
 
 import pywikibot
+import bs4
 import re
 import requests # for listing members with dir()
@@ -33,4 +34,119 @@
 iw_found = 0
 errors_issued = 0
+name_printed = 0
+
+# Prints the name of a page on which something occurred, if it has not been printed before
+def possibly_print(page_name):
+   global debug
+   global name_printed
+   
+   if not name_printed and not debug:
+      pywikibot.stdout('')
+      pywikibot.stdout('From page "{}":'.format(page_name))
+      name_printed = 1
+
+# Search a page for the section specified in the link
+def find_section(page_text, page_name, page_slug, prefix, print_result):
+   global errors_issued
+
+   # Isolate section link
+   target_page_name, anchor_name = page_slug.split('#')
+   target_page_name_human = target_page_name.replace('_', ' ')
+   
+   # Convert dot-notation hex entities to proper characters
+   anchor_name = anchor_name.replace('.22', '"')
+   anchor_name = anchor_name.replace('.27', '\'')
+   anchor_name = anchor_name.replace('.28', '(')
+   anchor_name = anchor_name.replace('.29', ')')
+   
+   # Read linked page to see if it really has this anchor link
+   soup = BeautifulSoup(page_text, 'html.parser')
+   found_section = False
+   for span_tag in soup.findAll('span'): # search for span with ID matching the section name
+      span_name = span_tag.get('id', None)
+      if span_name == anchor_name:
+         found_section = True
+         break
+   if found_section == False:
+      for span_tag in soup.findAll('div'): # search for div with ID matching the section name
+         span_name = span_tag.get('id', None)
+         if span_name == anchor_name:
+            found_section = True
+            break
+   if found_section == False:
+      possibly_print(page_name)
+      pywikibot.stdout('   ERROR: Could not find section "{0}" on {1} page "{2}".'.format(anchor_name, prefix, target_page_name_human))
+      errors_issued = errors_issued + 1
+   elif print_result == True:
+      pywikibot.stdout('   The section "{0}" was found on {1} page "{2}".'.format(anchor_name, prefix, target_page_name_human))
+
+# For a link that redirected us to another page, extract the name of the target page from
+# the target page's source
+def find_canonical_link(page_text, page_name, page_slug, prefix, prefix_url):
+   # Extract link from this markup which contains name of redirected-to page:
+   # <link rel="canonical" href="https://en.wikipedia.org/wiki/Page_name"/>
+   canonical_name = page_text.split('<link rel="canonical" href="')[-1]
+   prefix_length = len(prefix_url)
+   canonical_name = canonical_name[prefix_length:]
+   tag_end = canonical_name.find('">')
+   
+   if tag_end == -1:
+      pywikibot.stdout('   ERROR: The {0} link "{1}" is a redirect page, but this script could not isolate the target page name.'.format(prefix, page_slug))
+      errors_issued = errors_issued + 1
+   else:
+      canonical_name = canonical_name[:tag_end]
+      if len(canonical_name) > 100:
+         # Certain things can cause the trim to fail; report error and avoid slamming the
+         # output with massive page source from a failed trim
+         pywikibot.stdout('   ERROR: The {0} link "{1}" is a redirect to "{2}…" (string overflow).'.format(prefix, page_slug, canonical_name[:100]))
+         errors_issued = errors_issued + 1
+      else:
+         canonical_name = canonical_name.replace('_', ' ')
+         if '#' in page_slug:
+            _, anchor_name = page_slug.split('#')
+            pywikibot.stdout('   The {0} link "{1}" is a redirect to "{2}#{3}", which is a valid page. Checking section link….'.format(prefix, page_slug, canonical_name, anchor_name))
+            find_section(page_text, page_name, page_slug, prefix, True)
+         else:
+            pywikibot.stdout('   The {0} link "{1}" is a redirect to "{2}", which is a valid page.'.format(prefix, page_slug, canonical_name))
+
+# Test an interwiki link and look for a section link if applicable
+def test_interwiki_link(prefix, prefix_url, iw_url, page_name, page_slug):
+   global errors_issued
+   
+   response = fetch(iw_url)
+
+   # One way we tell that a redirect occurred is by checking fetch's history, as it
+   # automatically follows redirects. This will catch formal redirects which come from pages
+   # such as Special:PermanentLink.
+   if response.history != []:
+      possibly_print(page_name)
+         
+      if page_slug.startswith('WP:') and page_slug == page_slug.upper():
+         pywikibot.stdout('   Got redirection code "{0}" for {1} link "{2}". This appears to be a deliberate use of a Wikipedia shortcut. Checking the target page….'.format(response.history[0], prefix, page_slug))
+         find_canonical_link(response.text, page_name, page_slug, prefix, prefix_url)
+      else:
+         permalink1 = 'Special:PermanentLink/'.lower()
+         permalink2 = 'Special:Permalink/'.lower()
+         page_slug_lower = page_slug.lower()
+         if page_slug_lower.startswith(permalink1) or page_slug_lower.startswith(permalink2):
+            pywikibot.stdout('   Got redirection code "{0}" for {1} permanent revision link "{2}". Checking the target page….'.format(response.history[0], prefix, page_slug))
+            find_canonical_link(response.text, page_name, page_slug, prefix, prefix_url)
+         else:
+            pywikibot.stdout('   ERROR: Unrecognized type of redirection (code "{0}") for {1} link "{2}". You should check the link manually.'.format(response.history[0], prefix, page_slug))
+            errors_issued = errors_issued + 1
+   elif response.status_code != 200:
+      possibly_print(page_name)
+      pywikibot.stdout('   ERROR: Got response code {0} for {1} link "{2}". The page may not exist.'.format(response.status_code, prefix, page_slug))
+      errors_issued = errors_issued + 1
+   # However the usual way that a redirect occurs is that MediaWiki redirects us sneakily
+   # using JavaScript, while returning code OK 200 as if the link was correct; this happens
+   # when a redirect page is accessed. We must detect these soft redirects by looking at the
+   # page source to find the redirect note inserted at the top of the page for the reader.
+   elif 'Redirected from <a' in response.text:
+      possibly_print(page_name)
+      pywikibot.stdout('   Got silently redirected by {0} link "{1}". Checking the target page….'.format(prefix, page_slug))
+      find_canonical_link(response.text, page_name, page_slug, prefix, prefix_url)
+   elif '#' in page_slug:
+      find_section(response.text, page_name, page_slug, prefix, False)
 
 # Searches the given page text for interwiki links
@@ -39,7 +155,7 @@
    global pages_checked
    global iw_found
-   global errors_issued
+   global name_printed
    pages_checked = pages_checked + 1
-   cur = 0
+   cur_prefix = 0
    name_printed = 0
 
@@ -52,94 +168,50 @@
          e = match.end() - 1
 
-         # Sometimes we used a space char. instead of a '_', so fix that before querying
-         page_title = page_text[s:e].replace(' ', '_')
-
-         # Use only spaces for title when printing it
-         page_title_human = page_title.replace('_', ' ')
+         # Commonly we use spaces instead of underscores, so fix that before querying
+         page_slug = page_text[s:e].replace(' ', '_')
+
+         # But use spaces for title when printing it
+         page_title_human = page_slug.replace('_', ' ')
          if debug: pywikibot.stdout('   Validating {0} link "{1}"'.format(prefix, page_title_human))
          iw_found = iw_found + 1
 
          # Construct full URL for the particular wiki
-         iw_url = interwiki_urls[cur] + page_title
+         iw_url = interwiki_urls[cur_prefix] + page_slug
 
          # Adjust URL if this is a foreign-language WP link
-         if re.match("^[a-zA-Z]{2}:", page_title):
-            lang_code = page_title[0:2] + "."
+         if re.match("^[a-zA-Z]{2}:", page_slug):
+            lang_code = page_slug[0:2] + "."
             # "wp:" is the Wikipedia: namespace, not a language
             if lang_code != "wp." and lang_code != "WP.":
                iw_url = iw_url.replace('en.', lang_code)
-               iw_url = iw_url.replace(page_title[0:3], '')
+               iw_url = iw_url.replace(page_slug[0:3], '')
 
          # Test the URL
-         response = fetch(iw_url)
-
-         # One way we tell that a redirect occurred is by checking the history
-         if response.history != []:
-            if not name_printed and not debug:
-               pywikibot.stdout('From page "{}":'.format(page_name))
-               name_printed = 1
-            if page_title.startswith('WP:') and page_title == page_title.upper():
-               pywikibot.stdout('   ERROR: Got redirection code ({0}) for {1} link "{2}", but this appears to be a deliberate use of a Wikipedia shortcut. You should check the link manually.'.format(response.history[0], prefix, page_title))
-            else:
-               pywikibot.stdout('   ERROR: Got redirection code ({0}) for {1} link "{2}". You should check the link manually.'.format(response.history[0], prefix, page_title))
-            errors_issued = errors_issued + 1
-         elif response.status_code != 200:
-            if not name_printed and not debug:
-               pywikibot.stdout('From page "{}":'.format(page_name))
-               name_printed = 1
-            pywikibot.stdout('   ERROR: Got response code {0} for {1} link "{2}". The page may not exist.'.format(response.status_code, prefix, page_title))
-            errors_issued = errors_issued + 1
-         # The usual way that a redirect occurs is that MediaWiki redirects us sneakily
-         # using JavaScript, while returning code OK 200 as if the link was correct; we
-         # must detect this from the page source
-         elif 'Redirected from <a' in response.text:
-            if not name_printed and not debug:
-               pywikibot.stdout('From page "{}":'.format(page_name))
-               name_printed = 1
-            # Extract link from this source which contains name of redirected-to page:
-            # <link rel="canonical" href="https://en.wikipedia.org/wiki/Page_name"/>
-            canonical_name = response.text.split('<link rel="canonical" href="')[-1]
-            prefix_length = len(interwiki_urls[cur])
-            canonical_name = canonical_name[prefix_length:]
-            tag_end = canonical_name.find('"/>')
-            if tag_end == -1:
-               pywikibot.stdout('   ERROR: The {0} link "{1}" is a redirect page, but this script could not isolate the target page name.', format(prefix, page_title))
-            else:
-               canonical_name = canonical_name[:tag_end]
-               if len(canonical_name) > 100:
-                 # Certain things can cause the trim to fail; here we avoid slamming
-                 # the output with massive page source from a failed trim
-                 pywikibot.stdout('   ERROR: The {0} link "{1}" is a redirect to "{2}…" (string trimmed to 100 chars).'.format(prefix, page_title, canonical_name[:100]))
-               else:
-                 canonical_name = canonical_name.replace('_', ' ')
-                 pywikibot.stdout('   ERROR: The {0} link "{1}" is a redirect to "{2}".'.format(prefix, page_title, canonical_name))
-            errors_issued = errors_issued + 1
-         elif '#' in page_title:
-            # Isolate section link
-            target_page_name, anchor_name = page_title.split('#')
-            
-            # Convert dot-notation hex entities to proper characters
-            anchor_name = anchor_name.replace('.22', '"')
-            anchor_name = anchor_name.replace('.27', '\'')
-            anchor_name = anchor_name.replace('.28', '(')
-            anchor_name = anchor_name.replace('.29', ')')
-            
-            # Read linked page to see if it really has this anchor link
-            soup = BeautifulSoup(response.text, 'html.parser')
-            found_section = False
-            for span_tag in soup.findAll('span'):
-               span_name = span_tag.get('id', None)
-               if span_name == anchor_name:
-                  found_section = True
-                  break
-            if found_section == False:
-               if not name_printed and not debug:
-                  pywikibot.stdout('From page "{}":'.format(page_name))
-                  name_printed = 1
-               target_page_name_human = target_page_name.replace('_', ' ')
-               pywikibot.stdout('   ERROR: Could not find section "{0}" on {1} page "{2}".'.format(anchor_name, prefix, target_page_name_human))
-               errors_issued = errors_issued + 1
-      cur = cur + 1
-
+         test_interwiki_link(prefix, interwiki_urls[cur_prefix], iw_url, page_name, page_slug)
+      cur_prefix = cur_prefix + 1
+
+# Print a wrap-up message
+def print_summary():
+   global pages_checked
+   global iw_found
+   global errors_issued
+
+   page_str = "pages"
+   if pages_checked == 1:
+      page_str = "page"
+
+   link_str = "links"
+   if iw_found == 1:
+      link_str = "link"
+
+   pywikibot.stdout('Checked {0} {1} and found {2} interwiki {3}.'.format(pages_checked, page_str, iw_found, link_str))
+
+   error_str = "errors were"
+   if errors_issued == 1:
+      error_str = "error was"
+
+   pywikibot.stdout('{0} {1} encountered in validating these links.'.format(errors_issued, error_str))
+
+# Main function
 def main(*args):
    global debug
@@ -147,7 +219,6 @@
    search_page = ''
 
+   # Process arguments
    local_args = pywikibot.handle_args(args)
-   genFactory = pagegenerators.GeneratorFactory()
-
    for arg in local_args:
       if arg.startswith('-cat:'):
@@ -158,12 +229,13 @@
          debug = 1
       else:
-         pywikibot.stdout('Unknown argument "{}".'.format(arg))
+         pywikibot.stdout('Unknown argument "{}". Exiting.'.format(arg))
          return
-
-   site = pywikibot.Site()
 
    #pywikibot.stdout('The members of the requests.models.Response class are:')
    #pywikibot.stdout(format(dir(requests.models.Response)))
-
+   #return
+   
+   # Check specified page or loop through specified category and check all pages
+   site = pywikibot.Site()
    if search_cat != '':
       cat_obj = pywikibot.Category(site, search_cat)
@@ -177,23 +249,6 @@
       scan_for_interwiki_links(page.text, page.title())
 
-   global pages_checked
-   global iw_found
-   global errors_issued
-
-   page_str = "pages"
-   if pages_checked == 1:
-      page_str = "page"
-
-   link_str = "links"
-   if iw_found == 1:
-      link_str = "link"
-
-   pywikibot.stdout('Checked {0} {1} and found {2} interwiki {3}.'.format(pages_checked, page_str, iw_found, link_str))
-
-   error_str = "errors were"
-   if errors_issued == 1:
-      error_str = "error was"
-
-   pywikibot.stdout('{0} {1} encountered in validating these links.'.format(errors_issued, error_str))
+   # Print the results
+   print_summary()
 
 if __name__ == '__main__':
Index: /ValBot/Python/check_intrawiki_section_links.py
===================================================================
--- /ValBot/Python/check_intrawiki_section_links.py	(revision 1184)
+++ /ValBot/Python/check_intrawiki_section_links.py	(revision 1185)
@@ -27,5 +27,5 @@
 onigalore_url = 'https://wiki.oni2.net/'
 
-# Tuple of interwiki prefixes, for passing over such links
+# Tuple of interwiki prefixes, for recognizing and passing over such links
 interwiki_prefixes = ('acronym', 'cache', 'commons', 'dictionary', 'google', 'metawikimedia', 'mw', 'wikibooks', 'wikidata', 'wikimedia', 'wikinews', 'wikipedia', 'wikiquote', 'wikisource', 'wikispecies', 'wikiversity', 'wikivoyage', 'wikt', 'wiktionary', 'wp')
 
@@ -44,4 +44,119 @@
 advice_issued = 0
 errors_issued = 0
+name_printed = 0
+
+# Prints the name of a page on which something occurred, if it has not been printed before
+def possibly_print(page_name):
+   global debug
+   global name_printed
+
+   if not name_printed and not debug:
+      pywikibot.stdout('')
+      pywikibot.stdout('From page "{}":'.format(page_name))
+      name_printed = 1
+      
+# Search a page for the section specified in the link
+def find_section(page_text, page_name, page_slug, print_result):
+   global errors_issued
+
+   # Isolate section link
+   target_page_name, anchor_name = page_slug.split('#', 1)
+   target_page_name_human = target_page_name.replace('_', ' ')
+   if debug: pywikibot.stdout('         Searching for section link {} on page.'.format(anchor_name))
+
+   # Convert slash character to the dot-notation hex encoding that MediaWiki uses
+   anchor_name = anchor_name.replace('/', '.2F')
+
+   # Read linked page to see if it really has this anchor link
+   soup = BeautifulSoup(page_text, 'html.parser')
+   found_section = False
+   for span_tag in soup.findAll('span'):
+      span_name = span_tag.get('id', None)
+      if span_name == anchor_name:
+         if debug and not print_result: pywikibot.stdout('         Found section in a span!')
+         found_section = True
+         break
+   if found_section == False:
+      # Search for a div with this ID
+      for span_tag in soup.findAll('div'):
+         span_name = span_tag.get('id', None)
+         if span_name == anchor_name:
+            if debug and not print_result: pywikibot.stdout('         Found section in a div!')
+            found_section = True
+            break
+   if found_section == False:
+      possibly_print(page_name)
+      pywikibot.stdout('   ERROR: Could not find section "{0}" on page {1}!'.format(anchor_name, pre_section))
+      errors_issued += 1
+   elif debug and print_result:
+      pywikibot.stdout('   The section "{0}" was found on page "{1}".'.format(anchor_name, target_page_name_human))
+
+# For a link that redirected us to another page, extract the name of the target page from
+# the target page's source
+def find_canonical_link(page_text, page_name, page_slug):
+   # Extract link from this markup which contains name of redirected-to page:
+   # <link rel="canonical" href="https://en.wikipedia.org/wiki/Page_name"/>
+   # "wgPageName":"Namespace:Page_name",
+   canonical_name = page_text.split('"wgPageName":"')[-1]
+   tag_end = canonical_name.find('",')
+   
+   if tag_end == -1:
+      pywikibot.stdout('   ERROR: The link "{}" is a redirect page, but this script could not isolate the target page name.'.format(page_slug))
+      errors_issued = errors_issued + 1
+   else:
+      canonical_name = canonical_name[:tag_end]
+      if len(canonical_name) > 100:
+         # Certain things can cause the trim to fail; report error and avoid slamming the
+         # output with massive page source from a failed trim
+         pywikibot.stdout('   ERROR: The link "{}" is a redirect to "{2}…" (string overflow).'.format(page_slug, canonical_name[:100]))
+         errors_issued = errors_issued + 1
+      else:
+         canonical_name = canonical_name.replace('_', ' ')
+         if '#' in page_slug:
+            _, anchor_name = page_slug.split('#')
+            if debug: pywikibot.stdout('   The link "{0}" is a redirect to "{1}#{2}", which is a valid page. Checking section link….'.format(page_slug, canonical_name, anchor_name))
+            find_section(page_text, page_name, page_slug, True)
+         else:
+            pywikibot.stdout('   The link "{0}" is a redirect to "{1}", which is a valid page.'.format(page_slug, canonical_name))
+
+# Test an intrawiki link and look for a section link if applicable
+def test_intrawiki_link(iw_url, page_name, page_slug):
+   global advice_issued
+   global errors_issued
+   
+   response = fetch(iw_url)
+
+   # One way we tell that a redirect occurred is by checking fetch's history, as it
+   # automatically follows redirects. This will catch formal redirects which come from pages
+   # such as Special:PermanentLink.
+   if response.history != []:
+      
+      permalink1 = 'Special:PermanentLink/'.lower()
+      permalink2 = 'Special:Permalink/'.lower()
+      page_slug_lower = page_slug.lower()
+      if page_slug_lower.startswith(permalink1) or page_slug_lower.startswith(permalink2):
+         if debug:
+            possibly_print(page_name)
+            pywikibot.stdout('   Got redirection code "{0}" for permanent revision link "{1}". Checking the target page….'.format(response.history[0], page_slug))
+         find_canonical_link(response.text, page_name, page_slug)
+      else:
+         possibly_print(page_name)
+         pywikibot.stdout('   ERROR: Unrecognized type of redirection (code "{0}") for link "{1}". You should check the link manually.'.format(response.history[0], page_slug))
+         advice_issued += 1
+   elif response.status_code != 200:
+      possibly_print(page_name)
+      pywikibot.stdout('   ERROR: Got response code {0} on URL {1}. The target page may not exist.'.format(response.status_code, iw_url))
+      errors_issued += 1
+   # However the usual way that a redirect occurs is that MediaWiki redirects us sneakily
+   # using JavaScript, while returning code OK 200 as if the link was correct; this happens
+   # when a redirect page is accessed. We must detect these soft redirects by looking at the
+   # page source to find the redirect note inserted at the top of the page for the reader.
+   elif 'Redirected from <a' in response.text:
+      if debug:
+         possibly_print(page_name)
+         pywikibot.stdout('   Got silently redirected by link "{}". Checking the target page….'.format(page_slug))
+      find_canonical_link(response.text, page_name, page_slug)
+   else: # URL is OK, so proceed
+      find_section(response.text, page_name, page_slug, False)
 
 # Searches the given page text for intrawiki links with section links in them
@@ -52,4 +167,5 @@
    global advice_issued
    global errors_issued
+   global name_printed
    pages_checked += 1
    name_printed = 0
@@ -75,17 +191,17 @@
          s = match.start() + target_start # remove the link-opening markup
          e = match.end() - target_end # remove the link-ending markup
-         link_text = page_text[s:e]
+         page_slug = page_text[s:e]
          
          # The second link type will look like "Page|Section" or "|Section", so fix that pipe
          if i == 1:
-            link_text = link_text.replace('|', '#')
+            page_slug = page_slug.replace('|', '#')
 
          # Sometimes we use a space char. instead of a '_', so fix that before querying
-         link_text = link_text.replace(' ', '_')
-         if debug: pywikibot.stdout('      Found link {0}.'.format(link_text))
+         page_slug = page_slug.replace(' ', '_')
+         if debug: pywikibot.stdout('      Found link {0}.'.format(page_slug))
       
          # If this link doesn't have a section link in it, then we don't care about it, as
          # MediaWiki takes care of checking basic intrawiki links
-         if not '#' in link_text:
+         if not '#' in page_slug:
             if debug: pywikibot.stdout('         Link doesn\'t have a section anchor in it. Skipping.')
             continue
@@ -96,6 +212,6 @@
          if found_iw_match == False:
             for prefix in interwiki_prefixes:
-               if prefix + ":" in link_text:
-                  if debug: pywikibot.stdout('         Skipping link {} because it is an interwiki link.'.format(link_text))
+               if prefix + ":" in page_slug:
+                  if debug: pywikibot.stdout('         Skipping link {} because it is an interwiki link.'.format(page_slug))
                   is_interwiki = True
                   break
@@ -103,9 +219,10 @@
             continue
       
-         # If there is a '{' in the link, then probably it's a link built on transcluded text
-         # like "Quotes/Diary#{{C3}}", which we cannot expand and work with, so skip it
-         if '{' in link_text:
+         # If there is a '{' in the link, then probably it's a link built on transcluded text.
+         # If it's a chapter template transclusion like "Quotes/Diary#{{C3}}", expand it using
+         # our "chapter_names" array. If it's another type of transclusion, punt it to the user.
+         if '{' in page_slug:
             ch_link_pattern = re.compile(r"{{C[0-9]*}}")
-            ch_link = ch_link_pattern.search(link_text)
+            ch_link = ch_link_pattern.search(page_slug)
             if debug: pywikibot.stdout('         Found transclusion in link: "{}".'.format(ch_link.group(0)))
             if ch_link:
@@ -118,25 +235,19 @@
                      ch_name = chapter_names[ch_num_match]
                      replace_pattern = re.compile(r"{{C" + ch_num.group(0) + r"}}")
-                     link_text = replace_pattern.sub(ch_name, link_text)
-                     if debug: pywikibot.stdout('         After performing transclusion, link is now "{}".'.format(link_text))
+                     page_slug = replace_pattern.sub(ch_name, page_slug)
+                     if debug: pywikibot.stdout('         After performing transclusion, link is now "{}".'.format(page_slug))
                   else:
-                     if not name_printed and not debug:
-                        pywikibot.stdout('From page "{}":'.format(page_name))
-                        name_printed = 1
-                     pywikibot.stdout('   ADVICE: Link {0} transcludes a chapter name using an out-of-range number, {1}.'.format(link_text, ch_num_match))
-                     advice_issued += 1
+                     possibly_print(page_name)
+                     pywikibot.stdout('   ERROR: Link {0} transcludes a chapter name using an out-of-range number, {1}.'.format(page_slug, ch_num_match))
+                     errors_issued += 1
                      continue
                else:
-                  if not name_printed and not debug:
-                     pywikibot.stdout('From page "{}":'.format(page_name))
-                     name_printed = 1
-                  pywikibot.stdout('   ADVICE: Link {} seems to be transcluding a chapter name, but this script couldn\'t read it.'.format(link_text))
+                  possibly_print(page_name)
+                  pywikibot.stdout('   ADVICE: Link {} seems to be transcluding a chapter name, but this script couldn\'t read it.'.format(page_slug))
                   advice_issued += 1
                   continue
             else:
-               if not name_printed and not debug:
-                  pywikibot.stdout('From page "{}":'.format(page_name))
-                  name_printed = 1
-               pywikibot.stdout('   ADVICE: Link {0} seems to use transclusion. This script can understand chapter name transclusions such as "{1}" but it doesn\'t recognize this one so it can\'t be verified. You should check the link manually.'.format(link_text, "{{C7}}"))
+               possibly_print(page_name)
+               pywikibot.stdout('   ADVICE: Link {0} seems to use transclusion. This script can understand chapter name transclusions such as "{1}" but it doesn\'t recognize this one so it can\'t be verified. You should check the link manually.'.format(page_slug, "{{C7}}"))
                advice_issued += 1
                continue
@@ -145,37 +256,37 @@
          # that only a leading slash is looked for, so if there's multiple steps down ("/x/y"),
          # we're out of luck.
-         if link_text.startswith('/'):
-            link_text = page_name + link_text
-            if debug: pywikibot.stdout('         Changed link_text to {} on account of "/".'.format(link_text))
+         if page_slug.startswith('/'):
+            page_slug = page_name + page_slug
+            if debug: pywikibot.stdout('         Changed page_slug to {} on account of "/".'.format(page_slug))
       
          # If this is a relative "../" link, find the parent page, set ourselves to that page,
          # then remove the relative portion of the link. Note that this is only performed once,
          # so if there's multiple steps back ("../../"), we're out of luck.
-         if link_text.startswith('../'):
+         if page_slug.startswith('../'):
             last_slash = page_name.rfind('/')
             page_name2 = page_name[0:last_slash]
             if debug: pywikibot.stdout('         Changed page_name to {} on account of "../".'.format(page_name2))
-            link_text = link_text[3:len(link_text)]
-            if debug: pywikibot.stdout('         Changed link_text to {} on account of "../".'.format(link_text))
+            page_slug = page_slug[3:len(page_slug)]
+            if debug: pywikibot.stdout('         Changed page_slug to {} on account of "../".'.format(page_slug))
             # If this is now going to be a bare section link for the parent page, don't add a
             # slash, otherwise do because we are drilling down to another subpage
-            if link_text.startswith('#'):
-               link_text = page_name2 + link_text
+            if page_slug.startswith('#'):
+               page_slug = page_name2 + page_slug
             else:
-               link_text = page_name2 + '/' + link_text
+               page_slug = page_name2 + '/' + page_slug
          
          # If this is a bare section link, build URL based on this page
-         if link_text.startswith('#'):
+         if page_slug.startswith('#'):
             iw_url = onigalore_url + page_name2
             iw_found += 1
-            if debug: pywikibot.stdout('         Found link to this very page, {}.'.format(link_text))
+            if debug: pywikibot.stdout('         Found link to this very page, {}.'.format(page_slug))
             found_iw_match = True
-            link_text = page_name2 + link_text
+            page_slug = page_name2 + page_slug
       
          # If there's no ":" in the link (before the section link, where a colon would just be
          # part of the text) then it's a Main namespace article; proceed with building URL
          if found_iw_match == False:
-            if not re.search(":.*#", link_text):
-               iw_url = onigalore_url + link_text
+            if not re.search(":.*#", page_slug):
+               iw_url = onigalore_url + page_slug
                iw_found += 1
                if debug: pywikibot.stdout('         Link is to a Main namespace page.')
@@ -186,6 +297,6 @@
          if found_iw_match == False:
             for prefix in intrawiki_prefixes:
-               if prefix + ":" in link_text:
-                  iw_url = onigalore_url + link_text
+               if prefix + ":" in page_slug:
+                  iw_url = onigalore_url + page_slug
                   if debug: pywikibot.stdout('         Identified namespace {}.'.format(prefix))
                   iw_found += 1
@@ -195,65 +306,51 @@
          # If we still haven't turned this match into a URL, something's gone wrong
          if (found_iw_match == False) or (iw_url == ""):
-            if not name_printed and not debug:
-               pywikibot.stdout('From page "{}":'.format(page_name))
-               name_printed = 1
-            pywikibot.stdout('   ERROR: Couldn\'t figure out link {}.'.format(link_text))
+            possibly_print(page_name)
+            pywikibot.stdout('   ERROR: Couldn\'t figure out link {}.'.format(page_slug))
             continue
 
          # Test the URL
          iw_url = iw_url.replace(' ', '_')
-         if debug: pywikibot.stdout('         Reading page at {}...'.format(iw_url))
-         response = fetch(iw_url)
-
-         # Redirects are followed automatically by fetch() and treated as "200"s; the way we can
-         # tell that a redirect occurred is by checking fetch's history
-         if response.history != []:
-            if not name_printed and not debug:
-               pywikibot.stdout('From page "{}":'.format(page_name))
-               name_printed = 1
-            pywikibot.stdout('   ADVICE: Got redirection code ({0}) on URL "{1}". You should check the link manually.'.format(response.history[0], iw_url))
-            advice_issued += 1
-         elif response.status_code != 200:
-            if not name_printed and not debug:
-               pywikibot.stdout('From page "{}":'.format(page_name))
-               name_printed = 1
-            pywikibot.stdout('   ERROR: Got response code {0} on URL {1}. The target page may not exist.'.format(response.status_code, iw_url))
-            errors_issued += 1
-         else:
-            # Isolate section link
-            pre_section, section_name = link_text.split('#', 1)
-            if debug: pywikibot.stdout('         Searching for section link {} on page.'.format(section_name))
-         
-            # Convert slash character to the dot-notation hex encoding that MediaWiki uses
-            section_name = section_name.replace('/', '.2F')
-         
-            # Read linked page to see if it really has this anchor link
-            soup = BeautifulSoup(response.text, 'html.parser')
-            found_section = False
-            for span_tag in soup.findAll('span'):
-               span_name = span_tag.get('id', None)
-               if span_name == section_name:
-                  if debug: pywikibot.stdout('         Found section!')
-                  found_section = True
-                  break
-            if found_section == False:
-               if not name_printed and not debug:
-                  pywikibot.stdout('From page "{}":'.format(page_name))
-                  name_printed = 1
-               pywikibot.stdout('   ERROR: Could not find section {0} on page {1}!'.format(section_name, pre_section))
-               errors_issued += 1
-
-def main(*args):
-   global debug
+         if debug: pywikibot.stdout('         Reading page at {}….'.format(iw_url))
+         test_intrawiki_link(iw_url, page_name, page_slug)
+
+# Print a wrap-up message
+def print_summary():
    global pages_checked
    global iw_found
    global advice_issued
    global errors_issued
+   
+   page_str = "pages"
+   if pages_checked == 1:
+      page_str = "page"
+
+   link_str = "links"
+   if iw_found == 1:
+      link_str = "link"
+
+   pywikibot.stdout('Checked {0} {1} and found {2} intrawiki {3}.'.format(pages_checked, page_str, iw_found, link_str))
+   pywikibot.stdout('While attempting to follow section links….')
+
+   if advice_issued == 0:
+      pywikibot.stdout('   No advice on potential problems was issued.')
+   elif advice_issued == 1:
+      pywikibot.stdout('   1 piece of advice on a potential problem was issued.')
+   else:
+      pywikibot.stdout('   {} pieces of advice on potential problems were issued.'.format(advice_issued))
+
+   error_str = "errors were"
+   if errors_issued == 1:
+      error_str = "error was"
+   pywikibot.stdout('   {0} {1} encountered.'.format(errors_issued, error_str))
+
+# Main function
+def main(*args):
+   global debug
    search_cat = ''
    search_page = ''
 
+   # Process arguments
    local_args = pywikibot.handle_args(args)
-   genFactory = pagegenerators.GeneratorFactory()
-
    for arg in local_args:
       if arg.startswith('-cat:'):
@@ -272,4 +369,5 @@
    #pywikibot.stdout(format(dir(page)))
 
+   # Check specified page or loop through specified category and check all pages
    if search_cat != '':
       cat_obj = pywikibot.Category(site, search_cat)
@@ -283,26 +381,6 @@
       scan_for_intrawiki_links(page.text, page.title())
 
-   page_str = "pages"
-   if pages_checked == 1:
-      page_str = "page"
-
-   link_str = "links"
-   if iw_found == 1:
-      link_str = "link"
-
-   pywikibot.stdout('Checked {0} {1} and found {2} intrawiki {3}.'.format(pages_checked, page_str, iw_found, link_str))
-   pywikibot.stdout('While attempting to follow section links...')
-
-   if advice_issued == 0:
-      pywikibot.stdout('   No advice on potential problems was issued.')
-   elif advice_issued == 1:
-      pywikibot.stdout('   1 piece of advice on a potential problem was issued.')
-   else:
-      pywikibot.stdout('   {} pieces of advice on potential problems were issued.'.format(advice_issued))
-
-   error_str = "errors were"
-   if errors_issued == 1:
-      error_str = "error was"
-   pywikibot.stdout('   {0} {1} encountered.'.format(errors_issued, error_str))
+   # Print the results
+   print_summary()
 
 if __name__ == '__main__':
