Index: /ValBot/Python/check_interwiki_links.py
===================================================================
--- /ValBot/Python/check_interwiki_links.py	(revision 1173)
+++ /ValBot/Python/check_interwiki_links.py	(revision 1174)
@@ -14,5 +14,5 @@
 import pywikibot
 import re
-import requests
+import requests # for listing members with dir()
 
 from pywikibot.bot import QuitKeyboardInterrupt
@@ -51,8 +51,11 @@
             page_title = page_text[s:e].replace(' ', '_')
 
+            # Use only spaces for title when printing it
+            page_title_human = page_title.replace('_', ' ')
+            pywikibot.stdout('   Validating {0} link "{1}"'.format(prefix, page_title_human))
+            iw_found = iw_found + 1
+
             # Construct full URL for the particular wiki
             iw_url = interwiki_urls[cur] + page_title
-            pywikibot.stdout('   Validating {0} link "{1}"'.format(prefix, page_title))
-            iw_found = iw_found + 1
 
             # Adjust URL if this is a foreign-language WP link
@@ -65,9 +68,7 @@
 
             # Test the URL
-            #pywikibot.stdout('   Testing URL "{}"'.format(iw_url))
             response = fetch(iw_url)
 
-            # Redirects are followed automatically by fetch() and treated as "200"s, so the
-            # way we tell that a redirect occurred is by checking the history
+            # One way we tell that a redirect occurred is by checking the history
             if response.history != []:
                 pywikibot.stdout('   ERROR: Got redirection code ({0}) on URL "{1}".'.format(response.history[0], iw_url))
@@ -75,4 +76,26 @@
             elif response.status_code != 200:
                 pywikibot.stdout('   ERROR: Got response code {0} on URL "{1}".'.format(response.status_code, iw_url))
+                errors_issued = errors_issued + 1
+            # The usual way that a redirect occurs is that MediaWiki redirects us sneakily
+            # using JavaScript, while returning code OK 200 as if the link was correct; we
+            # must detect this from the page source
+            elif 'Redirected from <a' in response.text:
+                # Extract link from this source which contains name of redirected-to page:
+                # <link rel="canonical" href="https://en.wikipedia.org/wiki/Page_name"/>
+                canonical_name = response.text.split('<link rel="canonical" href="')[-1]
+                prefix_length = len(interwiki_urls[cur])
+                canonical_name = canonical_name[prefix_length:]
+                tag_end = canonical_name.find('"/>')
+                if tag_end == -1:
+                   pywikibot.stdout('   ERROR: This is a redirect page (but I could not isolate the correct page name).')
+                else:
+                   canonical_name = canonical_name[:tag_end]
+                   if len(canonical_name) > 100:
+                      # Certain things can cause the trim to fail; here we avoid slamming
+                      # the output with massive page source from a failed trim
+                      pywikibot.stdout('   ERROR: This is a redirect to "{}" (string trimmed to 100 chars due to excessive length).'.format(canonical_name[:100]))
+                   else:
+                      canonical_name = canonical_name.replace('_', ' ')
+                      pywikibot.stdout('   ERROR: This is a redirect to "{}".'.format(canonical_name))
                 errors_issued = errors_issued + 1
             elif '#' in page_title:
@@ -92,5 +115,4 @@
                     span_name = span_tag.get('id', None)
                     if span_name == anchor_name:
-                        #pywikibot.stdout('Found section!')
                         found_section = True
                         break
