# Check Interwiki Links
# by iritscen@yahoo.com
# Looks at each link on a page (or in all the pages in a category) which uses a registered
# interwiki prefix and loads the linked page, verifying that it exists and that any section
# link, if present, is valid as well. The output will use the word "ERROR" when it cannot
# validate the interwiki link.
# Recommended viewing width:
# |---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---|
import os
from urllib.parse import urljoin
import pywikibot
import re
import requests # for listing members with dir()
from pywikibot.bot import QuitKeyboardInterrupt
from pywikibot import pagegenerators
from pywikibot.tools.formatter import color_format
from pywikibot.comms.http import fetch
from pywikibot.specialbots import UploadRobot
from bs4 import BeautifulSoup
# Parallel arrays based on https://wiki.oni2.net/Special:Interwiki
interwiki_prefixes = ('acronym', 'cache', 'commons', 'dictionary', 'google', 'metawikimedia', 'mw', 'wikibooks', 'wikidata', 'wikimedia', 'wikinews', 'wikipedia', 'wikiquote', 'wikisource', 'wikispecies', 'wikiversity', 'wikivoyage', 'wikt', 'wiktionary', 'wp')
interwiki_urls = ('http://www.acronymfinder.com/~/search/af.aspx?string=exact&Acronym=', 'http://www.google.com/search?q=cache:', 'https://commons.wikimedia.org/wiki/', 'http://www.dict.org/bin/Dict?Database=*&Form=Dict1&Strategy=*&Query=', 'http://www.google.com/search?q=', 'https://meta.wikimedia.org/wiki/', 'https://www.mediawiki.org/wiki/', 'https://en.wikibooks.org/wiki/', 'https://www.wikidata.org/wiki/', 'https://foundation.wikimedia.org/wiki/', 'https://en.wikinews.org/wiki/', 'https://en.wikipedia.org/wiki/', 'https://en.wikiquote.org/wiki/', 'https://wikisource.org/wiki/', 'https://species.wikimedia.org/wiki/', 'https://en.wikiversity.org/wiki/', 'https://en.wikivoyage.org/wiki/', 'https://en.wiktionary.org/wiki/', 'https://en.wiktionary.org/wiki/', 'https://en.wikipedia.org/wiki/')
pages_checked = 0
iw_found = 0
errors_issued = 0
# Searches the given page text for interwiki links
def scan_for_iw_links(page_text):
global pages_checked
global iw_found
global errors_issued
pages_checked = pages_checked + 1
cur = 0
for prefix in interwiki_prefixes:
# Isolate strings that start with "[[prefix:" and end with "|" or "]"
iw_link = "\[\[" + prefix + ":[^|\]]*(\||\])"
for match in re.finditer(iw_link, page_text):
# Extract just the page title from this regex match
s = match.start() + 2 + len(prefix) + 1
e = match.end() - 1
# Sometimes we used a space char. instead of a '_', so fix that before querying
page_title = page_text[s:e].replace(' ', '_')
# Use only spaces for title when printing it
page_title_human = page_title.replace('_', ' ')
pywikibot.stdout(' Validating {0} link "{1}"'.format(prefix, page_title_human))
iw_found = iw_found + 1
# Construct full URL for the particular wiki
iw_url = interwiki_urls[cur] + page_title
# Adjust URL if this is a foreign-language WP link
if re.match("^[a-zA-Z]{2}:", page_title):
lang_code = page_title[0:2] + "."
# "wp:" is the Wikipedia: namespace, not a language
if lang_code != "wp." and lang_code != "WP.":
iw_url = iw_url.replace('en.', lang_code)
iw_url = iw_url.replace(page_title[0:3], '')
# Test the URL
response = fetch(iw_url)
# One way we tell that a redirect occurred is by checking the history
if response.history != []:
pywikibot.stdout(' ERROR: Got redirection code ({0}) on URL "{1}".'.format(response.history[0], iw_url))
errors_issued = errors_issued + 1
elif response.status_code != 200:
pywikibot.stdout(' ERROR: Got response code {0} on URL "{1}".'.format(response.status_code, iw_url))
errors_issued = errors_issued + 1
# The usual way that a redirect occurs is that MediaWiki redirects us sneakily
# using JavaScript, while returning code OK 200 as if the link was correct; we
# must detect this from the page source
elif 'Redirected from
canonical_name = response.text.split('')
if tag_end == -1:
pywikibot.stdout(' ERROR: This is a redirect page (but I could not isolate the correct page name).')
else:
canonical_name = canonical_name[:tag_end]
if len(canonical_name) > 100:
# Certain things can cause the trim to fail; here we avoid slamming
# the output with massive page source from a failed trim
pywikibot.stdout(' ERROR: This is a redirect to "{}" (string trimmed to 100 chars due to excessive length).'.format(canonical_name[:100]))
else:
canonical_name = canonical_name.replace('_', ' ')
pywikibot.stdout(' ERROR: This is a redirect to "{}".'.format(canonical_name))
errors_issued = errors_issued + 1
elif '#' in page_title:
# Isolate section link
page_name, anchor_name = page_title.split('#')
# Convert dot-notation hex entities to proper characters
anchor_name = anchor_name.replace('.22', '"')
anchor_name = anchor_name.replace('.27', '\'')
anchor_name = anchor_name.replace('.28', '(')
anchor_name = anchor_name.replace('.29', ')')
# Read linked page to see if it really has this anchor link
soup = BeautifulSoup(response.text, 'html.parser')
found_section = False
for span_tag in soup.findAll('span'):
span_name = span_tag.get('id', None)
if span_name == anchor_name:
found_section = True
break
if found_section == False:
pywikibot.stdout(' ERROR: Could not find section {0} on page {1}.'.format(anchor_name, page_name))
errors_issued = errors_issued + 1
cur = cur + 1
def main(*args):
cat_name = ''
page_name = ''
local_args = pywikibot.handle_args(args)
genFactory = pagegenerators.GeneratorFactory()
for arg in local_args:
if arg.startswith('-cat:'):
cat_name = arg[5:]
elif arg.startswith('-page:'):
page_name = arg[6:]
site = pywikibot.Site()
#pywikibot.stdout('The members of the requests.models.Response class are:')
#pywikibot.stdout(format(dir(requests.models.Response)))
if cat_name != '':
cat_obj = pywikibot.Category(site, cat_name)
generator = pagegenerators.CategorizedPageGenerator(cat_obj, recurse=True)
for page in pagegenerators.PreloadingGenerator(generator, 100):
pywikibot.stdout('Checking page "{}"'.format(page.title()))
scan_for_iw_links(page.text)
elif page_name != '':
page = pywikibot.Page(site, page_name)
pywikibot.stdout('Checking page "{}"'.format(page.title()))
scan_for_iw_links(page.text)
global pages_checked
global iw_found
global errors_issued
page_str = "pages"
if pages_checked == 1:
page_str = "page"
link_str = "links"
if iw_found == 1:
link_str = "link"
pywikibot.stdout('Checked {0} {1} and found {2} interwiki {3}.'.format(pages_checked, page_str, iw_found, link_str))
error_str = "errors were"
if errors_issued == 1:
error_str = "error was"
pywikibot.stdout('{0} {1} encountered in validating these links.'.format(errors_issued, error_str))
if __name__ == '__main__':
main()