[1170] | 1 | # Check Interwiki Links
|
---|
| 2 | # by iritscen@yahoo.com
|
---|
| 3 | # Looks at each link on a page (or in all the pages in a category) which uses a registered
|
---|
| 4 | # interwiki prefix and loads the linked page, verifying that it exists and that any section
|
---|
| 5 | # link, if present, is valid as well. The output will use the word "ERROR" when it cannot
|
---|
| 6 | # validate the interwiki link.
|
---|
| 7 | # Recommended viewing width:
|
---|
| 8 | # |---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---|
|
---|
| 9 |
|
---|
[1169] | 10 | import os
|
---|
| 11 |
|
---|
| 12 | from urllib.parse import urljoin
|
---|
| 13 |
|
---|
| 14 | import pywikibot
|
---|
| 15 | import re
|
---|
[1174] | 16 | import requests # for listing members with dir()
|
---|
[1169] | 17 |
|
---|
| 18 | from pywikibot.bot import QuitKeyboardInterrupt
|
---|
| 19 | from pywikibot import pagegenerators
|
---|
| 20 | from pywikibot.tools.formatter import color_format
|
---|
| 21 | from pywikibot.comms.http import fetch
|
---|
| 22 | from pywikibot.specialbots import UploadRobot
|
---|
| 23 | from bs4 import BeautifulSoup
|
---|
| 24 |
|
---|
| 25 | # Parallel arrays based on https://wiki.oni2.net/Special:Interwiki
|
---|
| 26 | interwiki_prefixes = ('acronym', 'cache', 'commons', 'dictionary', 'google', 'metawikimedia', 'mw', 'wikibooks', 'wikidata', 'wikimedia', 'wikinews', 'wikipedia', 'wikiquote', 'wikisource', 'wikispecies', 'wikiversity', 'wikivoyage', 'wikt', 'wiktionary', 'wp')
|
---|
| 27 |
|
---|
| 28 | interwiki_urls = ('http://www.acronymfinder.com/~/search/af.aspx?string=exact&Acronym=', 'http://www.google.com/search?q=cache:', 'https://commons.wikimedia.org/wiki/', 'http://www.dict.org/bin/Dict?Database=*&Form=Dict1&Strategy=*&Query=', 'http://www.google.com/search?q=', 'https://meta.wikimedia.org/wiki/', 'https://www.mediawiki.org/wiki/', 'https://en.wikibooks.org/wiki/', 'https://www.wikidata.org/wiki/', 'https://foundation.wikimedia.org/wiki/', 'https://en.wikinews.org/wiki/', 'https://en.wikipedia.org/wiki/', 'https://en.wikiquote.org/wiki/', 'https://wikisource.org/wiki/', 'https://species.wikimedia.org/wiki/', 'https://en.wikiversity.org/wiki/', 'https://en.wikivoyage.org/wiki/', 'https://en.wiktionary.org/wiki/', 'https://en.wiktionary.org/wiki/', 'https://en.wikipedia.org/wiki/')
|
---|
| 29 |
|
---|
[1180] | 30 | # Initialize globals
|
---|
| 31 | debug = 0
|
---|
[1169] | 32 | pages_checked = 0
|
---|
| 33 | iw_found = 0
|
---|
[1170] | 34 | errors_issued = 0
|
---|
[1169] | 35 |
|
---|
| 36 | # Searches the given page text for interwiki links
|
---|
[1180] | 37 | def scan_for_interwiki_links(page_text, page_name):
|
---|
| 38 | global debug
|
---|
| 39 | global pages_checked
|
---|
| 40 | global iw_found
|
---|
| 41 | global errors_issued
|
---|
| 42 | pages_checked = pages_checked + 1
|
---|
| 43 | cur = 0
|
---|
| 44 | name_printed = 0
|
---|
[1169] | 45 |
|
---|
[1180] | 46 | for prefix in interwiki_prefixes:
|
---|
| 47 | # Isolate strings that start with "[[prefix:" and end with "|" or "]"
|
---|
| 48 | iw_link = "\[\[" + prefix + ":[^|\]]*(\||\])"
|
---|
| 49 | for match in re.finditer(iw_link, page_text):
|
---|
| 50 | # Extract just the page title from this regex match
|
---|
| 51 | s = match.start() + 2 + len(prefix) + 1
|
---|
| 52 | e = match.end() - 1
|
---|
[1169] | 53 |
|
---|
[1180] | 54 | # Sometimes we used a space char. instead of a '_', so fix that before querying
|
---|
| 55 | page_title = page_text[s:e].replace(' ', '_')
|
---|
[1169] | 56 |
|
---|
[1180] | 57 | # Use only spaces for title when printing it
|
---|
| 58 | page_title_human = page_title.replace('_', ' ')
|
---|
| 59 | if debug: pywikibot.stdout(' Validating {0} link "{1}"'.format(prefix, page_title_human))
|
---|
| 60 | iw_found = iw_found + 1
|
---|
[1174] | 61 |
|
---|
[1180] | 62 | # Construct full URL for the particular wiki
|
---|
| 63 | iw_url = interwiki_urls[cur] + page_title
|
---|
[1169] | 64 |
|
---|
[1180] | 65 | # Adjust URL if this is a foreign-language WP link
|
---|
| 66 | if re.match("^[a-zA-Z]{2}:", page_title):
|
---|
| 67 | lang_code = page_title[0:2] + "."
|
---|
| 68 | # "wp:" is the Wikipedia: namespace, not a language
|
---|
| 69 | if lang_code != "wp." and lang_code != "WP.":
|
---|
| 70 | iw_url = iw_url.replace('en.', lang_code)
|
---|
| 71 | iw_url = iw_url.replace(page_title[0:3], '')
|
---|
[1169] | 72 |
|
---|
[1180] | 73 | # Test the URL
|
---|
| 74 | response = fetch(iw_url)
|
---|
[1169] | 75 |
|
---|
[1180] | 76 | # One way we tell that a redirect occurred is by checking the history
|
---|
| 77 | if response.history != []:
|
---|
| 78 | if not name_printed and not debug:
|
---|
| 79 | pywikibot.stdout('From page "{}":'.format(page_name))
|
---|
| 80 | name_printed = 1
|
---|
| 81 | if page_title.startswith('WP:') and page_title == page_title.upper():
|
---|
| 82 | pywikibot.stdout(' ERROR: Got redirection code ({0}) for {1} link "{2}", but this appears to be a deliberate use of a Wikipedia shortcut. You should check the link manually.'.format(response.history[0], prefix, page_title))
|
---|
| 83 | else:
|
---|
| 84 | pywikibot.stdout(' ERROR: Got redirection code ({0}) for {1} link "{2}". You should check the link manually.'.format(response.history[0], prefix, page_title))
|
---|
| 85 | errors_issued = errors_issued + 1
|
---|
| 86 | elif response.status_code != 200:
|
---|
| 87 | if not name_printed and not debug:
|
---|
| 88 | pywikibot.stdout('From page "{}":'.format(page_name))
|
---|
| 89 | name_printed = 1
|
---|
| 90 | pywikibot.stdout(' ERROR: Got response code {0} for {1} link "{2}". The page may not exist.'.format(response.status_code, prefix, page_title))
|
---|
| 91 | errors_issued = errors_issued + 1
|
---|
| 92 | # The usual way that a redirect occurs is that MediaWiki redirects us sneakily
|
---|
| 93 | # using JavaScript, while returning code OK 200 as if the link was correct; we
|
---|
| 94 | # must detect this from the page source
|
---|
| 95 | elif 'Redirected from <a' in response.text:
|
---|
| 96 | if not name_printed and not debug:
|
---|
| 97 | pywikibot.stdout('From page "{}":'.format(page_name))
|
---|
| 98 | name_printed = 1
|
---|
| 99 | # Extract link from this source which contains name of redirected-to page:
|
---|
| 100 | # <link rel="canonical" href="https://en.wikipedia.org/wiki/Page_name"/>
|
---|
| 101 | canonical_name = response.text.split('<link rel="canonical" href="')[-1]
|
---|
| 102 | prefix_length = len(interwiki_urls[cur])
|
---|
| 103 | canonical_name = canonical_name[prefix_length:]
|
---|
| 104 | tag_end = canonical_name.find('"/>')
|
---|
| 105 | if tag_end == -1:
|
---|
| 106 | pywikibot.stdout(' ERROR: The {0} link "{1}" is a redirect page, but this script could not isolate the target page name.', format(prefix, page_title))
|
---|
| 107 | else:
|
---|
| 108 | canonical_name = canonical_name[:tag_end]
|
---|
| 109 | if len(canonical_name) > 100:
|
---|
| 110 | # Certain things can cause the trim to fail; here we avoid slamming
|
---|
| 111 | # the output with massive page source from a failed trim
|
---|
| 112 | pywikibot.stdout(' ERROR: The {0} link "{1}" is a redirect to "{2}…" (string trimmed to 100 chars).'.format(prefix, page_title, canonical_name[:100]))
|
---|
| 113 | else:
|
---|
| 114 | canonical_name = canonical_name.replace('_', ' ')
|
---|
| 115 | pywikibot.stdout(' ERROR: The {0} link "{1}" is a redirect to "{2}".'.format(prefix, page_title, canonical_name))
|
---|
| 116 | errors_issued = errors_issued + 1
|
---|
| 117 | elif '#' in page_title:
|
---|
| 118 | # Isolate section link
|
---|
| 119 | target_page_name, anchor_name = page_title.split('#')
|
---|
| 120 |
|
---|
| 121 | # Convert dot-notation hex entities to proper characters
|
---|
| 122 | anchor_name = anchor_name.replace('.22', '"')
|
---|
| 123 | anchor_name = anchor_name.replace('.27', '\'')
|
---|
| 124 | anchor_name = anchor_name.replace('.28', '(')
|
---|
| 125 | anchor_name = anchor_name.replace('.29', ')')
|
---|
| 126 |
|
---|
| 127 | # Read linked page to see if it really has this anchor link
|
---|
| 128 | soup = BeautifulSoup(response.text, 'html.parser')
|
---|
| 129 | found_section = False
|
---|
| 130 | for span_tag in soup.findAll('span'):
|
---|
| 131 | span_name = span_tag.get('id', None)
|
---|
| 132 | if span_name == anchor_name:
|
---|
| 133 | found_section = True
|
---|
| 134 | break
|
---|
| 135 | if found_section == False:
|
---|
| 136 | if not name_printed and not debug:
|
---|
| 137 | pywikibot.stdout('From page "{}":'.format(page_name))
|
---|
| 138 | name_printed = 1
|
---|
| 139 | target_page_name_human = target_page_name.replace('_', ' ')
|
---|
| 140 | pywikibot.stdout(' ERROR: Could not find section "{0}" on {1} page "{2}".'.format(anchor_name, prefix, target_page_name_human))
|
---|
| 141 | errors_issued = errors_issued + 1
|
---|
| 142 | cur = cur + 1
|
---|
[1169] | 143 |
|
---|
| 144 | def main(*args):
|
---|
[1180] | 145 | global debug
|
---|
| 146 | search_cat = ''
|
---|
| 147 | search_page = ''
|
---|
[1169] | 148 |
|
---|
[1180] | 149 | local_args = pywikibot.handle_args(args)
|
---|
| 150 | genFactory = pagegenerators.GeneratorFactory()
|
---|
[1169] | 151 |
|
---|
[1180] | 152 | for arg in local_args:
|
---|
| 153 | if arg.startswith('-cat:'):
|
---|
| 154 | search_cat = arg[5:]
|
---|
| 155 | elif arg.startswith('-page:'):
|
---|
| 156 | search_page = arg[6:]
|
---|
| 157 | elif arg == '-dbg':
|
---|
| 158 | debug = 1
|
---|
| 159 | else:
|
---|
| 160 | pywikibot.stdout('Unknown argument "{}".'.format(arg))
|
---|
| 161 | return
|
---|
[1169] | 162 |
|
---|
[1180] | 163 | site = pywikibot.Site()
|
---|
[1169] | 164 |
|
---|
[1180] | 165 | #pywikibot.stdout('The members of the requests.models.Response class are:')
|
---|
| 166 | #pywikibot.stdout(format(dir(requests.models.Response)))
|
---|
[1169] | 167 |
|
---|
[1180] | 168 | if search_cat != '':
|
---|
| 169 | cat_obj = pywikibot.Category(site, search_cat)
|
---|
| 170 | generator = pagegenerators.CategorizedPageGenerator(cat_obj, recurse=True)
|
---|
| 171 | for page in pagegenerators.PreloadingGenerator(generator, 100):
|
---|
| 172 | if debug: pywikibot.stdout('Checking page "{}"'.format(page.title()))
|
---|
| 173 | scan_for_interwiki_links(page.text, page.title())
|
---|
| 174 | elif search_page != '':
|
---|
| 175 | page = pywikibot.Page(site, search_page)
|
---|
| 176 | if debug: pywikibot.stdout('Checking page "{}"'.format(page.title()))
|
---|
| 177 | scan_for_interwiki_links(page.text, page.title())
|
---|
[1169] | 178 |
|
---|
[1180] | 179 | global pages_checked
|
---|
| 180 | global iw_found
|
---|
| 181 | global errors_issued
|
---|
[1169] | 182 |
|
---|
[1180] | 183 | page_str = "pages"
|
---|
| 184 | if pages_checked == 1:
|
---|
| 185 | page_str = "page"
|
---|
[1170] | 186 |
|
---|
[1180] | 187 | link_str = "links"
|
---|
| 188 | if iw_found == 1:
|
---|
| 189 | link_str = "link"
|
---|
[1170] | 190 |
|
---|
[1180] | 191 | pywikibot.stdout('Checked {0} {1} and found {2} interwiki {3}.'.format(pages_checked, page_str, iw_found, link_str))
|
---|
[1170] | 192 |
|
---|
[1180] | 193 | error_str = "errors were"
|
---|
| 194 | if errors_issued == 1:
|
---|
| 195 | error_str = "error was"
|
---|
[1170] | 196 |
|
---|
[1180] | 197 | pywikibot.stdout('{0} {1} encountered in validating these links.'.format(errors_issued, error_str))
|
---|
[1170] | 198 |
|
---|
[1169] | 199 | if __name__ == '__main__':
|
---|
[1180] | 200 | main()
|
---|