source: ValBot/Python/check_interwiki_links.py@ 1211

Last change on this file since 1211 was 1207, checked in by iritscen, 5 weeks ago

ValBot: Added throttle to check_interwiki_links.py to avoid Wikipedia kicking us out with error 429.

File size: 13.8 KB
RevLine 
[1170]1# Check Interwiki Links
2# by iritscen@yahoo.com
[1198]3# Looks at each link on a page (or all the pages in a category) which uses a registered interwiki prefix and loads the linked page, verifying that it exists and that
4# any section link, if present, is valid as well. The output will use the word "ERROR" when it cannot validate the interwiki link.
[1170]5# Recommended viewing width:
[1198]6# |---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----|
[1170]7
[1196]8import bs4
[1169]9import pywikibot
10import re
[1196]11import requests # for listing members with dir() when debugging
[1207]12import time
[1169]13
[1196]14from bs4 import BeautifulSoup
15from pywikibot import pagegenerators
[1169]16from pywikibot.bot import QuitKeyboardInterrupt
17from pywikibot.comms.http import fetch
18from pywikibot.specialbots import UploadRobot
[1196]19from urllib.parse import urljoin
[1169]20
[1196]21class IWLink:
[1198]22 def __init__(self, iw_prefix, prefix_url, full_url, page_name, page_name_only, page_slug, hosting_page, curl_response):
[1207]23 self.iw_prefix = iw_prefix # e.g. "wp" as in [[wp:Marathon (series)#Rampancy]]
[1196]24 self.prefix_url = prefix_url # e.g. "https://en.wikipedia.org/wiki/"
[1198]25 self.full_url = full_url # e.g. "https://en.wikipedia.org/wiki/Marathon_(series)#Rampancy"
26 self.page_name = page_name # "Marathon (series)#Rampancy"
27 self.page_name_only = page_name # "Marathon (series)"
28 self.page_slug = page_slug # "Marathon_(series)#Rampancy"
29 self.hosting_page = hosting_page # "Easter eggs"; page where the link was found
[1196]30 self.curl_response = curl_response # a class defined in the Requests library
31
[1169]32# Parallel arrays based on https://wiki.oni2.net/Special:Interwiki
33interwiki_prefixes = ('acronym', 'cache', 'commons', 'dictionary', 'google', 'metawikimedia', 'mw', 'wikibooks', 'wikidata', 'wikimedia', 'wikinews', 'wikipedia', 'wikiquote', 'wikisource', 'wikispecies', 'wikiversity', 'wikivoyage', 'wikt', 'wiktionary', 'wp')
34
35interwiki_urls = ('http://www.acronymfinder.com/~/search/af.aspx?string=exact&Acronym=', 'http://www.google.com/search?q=cache:', 'https://commons.wikimedia.org/wiki/', 'http://www.dict.org/bin/Dict?Database=*&Form=Dict1&Strategy=*&Query=', 'http://www.google.com/search?q=', 'https://meta.wikimedia.org/wiki/', 'https://www.mediawiki.org/wiki/', 'https://en.wikibooks.org/wiki/', 'https://www.wikidata.org/wiki/', 'https://foundation.wikimedia.org/wiki/', 'https://en.wikinews.org/wiki/', 'https://en.wikipedia.org/wiki/', 'https://en.wikiquote.org/wiki/', 'https://wikisource.org/wiki/', 'https://species.wikimedia.org/wiki/', 'https://en.wikiversity.org/wiki/', 'https://en.wikivoyage.org/wiki/', 'https://en.wiktionary.org/wiki/', 'https://en.wiktionary.org/wiki/', 'https://en.wikipedia.org/wiki/')
36
[1180]37# Initialize globals
38debug = 0
[1169]39pages_checked = 0
40iw_found = 0
[1170]41errors_issued = 0
[1196]42unintended_redirects_found = 0
[1185]43name_printed = 0
[1207]44request_delay = 1.5
45max_retries = 3
46backoff_factor = 2
[1169]47
[1185]48# Prints the name of a page on which something occurred, if it has not been printed before
[1198]49def possibly_print(the_link):
[1185]50 global debug
51 global name_printed
52
53 if not name_printed and not debug:
54 pywikibot.stdout('')
[1198]55 pywikibot.stdout('From page "{}":'.format(the_link.hosting_page))
[1185]56 name_printed = 1
57
58# Search a page for the section specified in the link
[1196]59def find_section(the_link, print_result):
[1185]60 global errors_issued
61
62 # Isolate section link
[1198]63 _, anchor_name = the_link.page_slug.split('#')
[1185]64
65 # Convert dot-notation hex entities to proper characters
[1196]66 replacements = [(r'\.22', '"'), (r'\.27', "'"), (r'\.28', '('), (r'\.29', ')')]
67 for pattern, replacement in replacements:
68 anchor_name = re.sub(pattern, replacement, anchor_name)
[1185]69
70 # Read linked page to see if it really has this anchor link
[1196]71 soup = BeautifulSoup(the_link.curl_response.text, 'html.parser')
72 tags_to_search = ['span', 'div', 'h2', 'h3', 'h4']
[1185]73 found_section = False
[1196]74 for tag_name in tags_to_search:
75 for the_tag in soup.find_all(tag_name):
76 if the_tag.get('id') == anchor_name:
77 found_section = True
78 break
79 if found_section:
80 break
81
82 # Tell user what we found
[1185]83 if found_section == False:
[1198]84 possibly_print(the_link)
85 pywikibot.stdout(' ERROR: Could not find section "{0}" on {1} page "{2}".'.format(anchor_name, the_link.iw_prefix, the_link.page_name))
[1185]86 errors_issued = errors_issued + 1
87 elif print_result == True:
[1198]88 pywikibot.stdout(' The section "{0}" was found on {1} page "{2}".'.format(anchor_name, the_link.iw_prefix, the_link.page_name))
[1185]89
[1198]90# For a link that redirected us to another page, extract the name of the target page from the target page's source
[1196]91def find_canonical_link(the_link):
[1185]92 # Extract link from this markup which contains name of redirected-to page:
93 # <link rel="canonical" href="https://en.wikipedia.org/wiki/Page_name"/>
[1196]94 canonical_name = the_link.curl_response.text.split('<link rel="canonical" href="')[-1]
95 prefix_length = len(the_link.prefix_url)
[1185]96 canonical_name = canonical_name[prefix_length:]
97 tag_end = canonical_name.find('">')
98
99 if tag_end == -1:
[1196]100 pywikibot.stdout(' ERROR: The {0} link "{1}" is a redirect page, but this script could not isolate the target page name.'.format(the_link.iw_prefix, the_link.page_slug))
[1185]101 errors_issued = errors_issued + 1
102 else:
103 canonical_name = canonical_name[:tag_end]
104 if len(canonical_name) > 100:
[1198]105 # Certain things can cause the trim to fail; report error and avoid slamming the output with massive page source from a failed trim
[1200]106 pywikibot.stdout(' ERROR: The {0} link "{1}" is a redirect to "{2}…" (string overflow).'.format(the_link.iw_prefix, the_link.page_slug, canonical_name[:100]))
[1185]107 errors_issued = errors_issued + 1
108 else:
[1198]109 the_link.page_name = canonical_name.replace('_', ' ')
[1196]110 if '#' in the_link.page_slug:
[1198]111 the_link.page_name_only, _ = the_link.page_slug.split('#')
[1200]112 pywikibot.stdout(' The {0} link "{1}" is a redirect to "{2}", which is a valid page. Checking for section on that page….'.format(the_link.iw_prefix, the_link.page_name_only, the_link.page_name))
[1196]113 find_section(the_link, True)
[1185]114 else:
[1198]115 pywikibot.stdout(' The {0} link "{1}" is a redirect to "{2}", which is a valid page.'.format(the_link.iw_prefix, the_link.page_slug, the_link.page_name))
[1185]116
117# Test an interwiki link and look for a section link if applicable
[1196]118def test_interwiki_link(the_link):
[1185]119 global errors_issued
[1196]120 global unintended_redirects_found
[1185]121
[1207]122 # We have to carefully throttle requests because otherwise we will get hit with a 429: Too Many Requests
123 attempt = 0
124 delay = request_delay
125 while True:
126 time.sleep(delay)
127
128 the_link.curl_response = fetch(the_link.full_url)
129
130 if the_link.curl_response.status_code != 429:
131 break
132
133 attempt += 1
134 if attempt > max_retries:
135 pywikibot.stdout(f' ERROR: Maximum retries afer error 429 exceeded for "{the_link.page_slug}". Aborting script.')
136 raise SystemExit(1)
137
138 # Increase rate limit if we got the error
139 delay *= backoff_factor
140 pywikibot.stdout(f' WARNING: Received error 429 for "{the_link.page_slug}". Retrying in {delay:.1f}s...')
[1185]141
[1198]142 # One way we tell that a redirect occurred is by checking fetch's history, as it automatically follows redirects. This will catch formal redirects which come from
143 # pages such as Special:PermanentLink.
[1196]144 if the_link.curl_response.history != []:
[1198]145 possibly_print(the_link)
[1196]146
147 # If linked page is in all caps, e.g. WP:BEANS, it's likely a deliberate use of a redirect
148 if the_link.page_slug.startswith('WP:') and the_link.page_slug == the_link.page_slug.upper():
[1200]149 pywikibot.stdout(' Got redirection code "{0}" for {1} link "{2}". This appears to be a deliberate use of a Wikipedia shortcut. Checking the target page….'.format(the_link.curl_response.history[0], the_link.iw_prefix, the_link.page_slug))
[1196]150 find_canonical_link(the_link)
[1185]151 else:
152 permalink1 = 'Special:PermanentLink/'.lower()
153 permalink2 = 'Special:Permalink/'.lower()
[1196]154 page_slug_lower = the_link.page_slug.lower()
[1185]155 if page_slug_lower.startswith(permalink1) or page_slug_lower.startswith(permalink2):
[1200]156 pywikibot.stdout(' Got redirection code "{0}" for {1} permanent revision link "{2}". Checking the target page….'.format(the_link.curl_response.history[0], the_link.iw_prefix, the_link.page_slug))
[1196]157 find_canonical_link(the_link)
[1185]158 else:
[1196]159 pywikibot.stdout(' ERROR: Unrecognized type of redirection (code "{0}") for {1} link "{2}". You should check the link manually.'.format(the_link.curl_response.history[0], the_link.iw_prefix, the_link.page_slug))
[1185]160 errors_issued = errors_issued + 1
[1196]161 elif the_link.curl_response.status_code != 200:
[1198]162 possibly_print(the_link)
[1196]163 pywikibot.stdout(' ERROR: Got response code {0} for {1} link "{2}". The page may not exist.'.format(the_link.curl_response.status_code, the_link.iw_prefix, the_link.page_slug))
[1185]164 errors_issued = errors_issued + 1
[1198]165 # However the usual way that a redirect occurs is that MediaWiki redirects us sneakily using JavaScript, while returning code OK 200 as if the link was correct; this
166 # happens when a redirect page is accessed. We must detect these soft redirects by looking at the page source to find the redirect note inserted at the top of the
167 # page for the reader.
[1196]168 elif 'Redirected from <a' in the_link.curl_response.text:
169 unintended_redirects_found = unintended_redirects_found + 1
[1198]170 possibly_print(the_link)
[1200]171 pywikibot.stdout(' WARNING: Got silently redirected by {0} link "{1}". Checking the target page….'.format(the_link.iw_prefix, the_link.page_slug))
[1198]172 find_canonical_link(the_link) # calls find_section() at end
[1196]173 elif '#' in the_link.page_slug:
174 find_section(the_link, False)
[1185]175
[1169]176# Searches the given page text for interwiki links
[1180]177def scan_for_interwiki_links(page_text, page_name):
178 global debug
179 global pages_checked
180 global iw_found
[1185]181 global name_printed
[1180]182 pages_checked = pages_checked + 1
[1185]183 cur_prefix = 0
[1180]184 name_printed = 0
[1169]185
[1180]186 for prefix in interwiki_prefixes:
187 # Isolate strings that start with "[[prefix:" and end with "|" or "]"
[1192]188 iw_link = r"\[\[" + prefix + r":[^|\]]*(\||\])"
[1180]189 for match in re.finditer(iw_link, page_text):
[1198]190 the_link = IWLink(prefix, interwiki_urls[cur_prefix], "", "", "", "", page_name, "")
[1196]191
[1180]192 # Extract just the page title from this regex match
[1196]193 s = match.start() + 2 + len(the_link.iw_prefix) + 1
[1180]194 e = match.end() - 1
[1169]195
[1198]196 # Use underscores in slug used to constructed URL, but retain spaces for printable name
[1196]197 the_link.page_slug = page_text[s:e].replace(' ', '_')
[1198]198 the_link.page_name = page_text[s:e]
199 if debug: pywikibot.stdout(' Validating {0} link "{1}"'.format(the_link.iw_prefix, the_link.page_name))
[1180]200 iw_found = iw_found + 1
[1174]201
[1180]202 # Construct full URL for the particular wiki
[1196]203 the_link.full_url = the_link.prefix_url + the_link.page_slug
[1169]204
[1180]205 # Adjust URL if this is a foreign-language WP link
[1196]206 if re.match("^[a-zA-Z]{2}:", the_link.page_slug):
207 lang_code = the_link.page_slug[0:2] + "."
[1180]208 # "wp:" is the Wikipedia: namespace, not a language
209 if lang_code != "wp." and lang_code != "WP.":
[1196]210 the_link.full_url = the_link.full_url.replace('en.', lang_code)
211 the_link.full_url = the_link.full_url.replace(the_link.page_slug[0:3], '')
[1169]212
[1180]213 # Test the URL
[1196]214 test_interwiki_link(the_link)
[1185]215 cur_prefix = cur_prefix + 1
[1169]216
[1185]217# Print a wrap-up message
218def print_summary():
219 global pages_checked
220 global iw_found
221 global errors_issued
[1196]222 global unintended_redirects_found
[1169]223
[1185]224 page_str = "pages"
225 if pages_checked == 1:
226 page_str = "page"
227
228 link_str = "links"
229 if iw_found == 1:
230 link_str = "link"
231
232 pywikibot.stdout('Checked {0} {1} and found {2} interwiki {3}.'.format(pages_checked, page_str, iw_found, link_str))
233
234 error_str = "errors were"
235 if errors_issued == 1:
236 error_str = "error was"
237
238 pywikibot.stdout('{0} {1} encountered in validating these links.'.format(errors_issued, error_str))
239
[1196]240 warning_str = "likely-unintended redirects were"
241 if unintended_redirects_found == 1:
242 warning_str = "likely-unintended redirect was"
243
244 pywikibot.stdout('{0} {1} encountered in validating these links.'.format(unintended_redirects_found, warning_str))
245
[1185]246# Main function
[1169]247def main(*args):
[1180]248 global debug
249 search_cat = ''
250 search_page = ''
[1169]251
[1185]252 # Process arguments
[1180]253 local_args = pywikibot.handle_args(args)
254 for arg in local_args:
255 if arg.startswith('-cat:'):
256 search_cat = arg[5:]
257 elif arg.startswith('-page:'):
258 search_page = arg[6:]
259 elif arg == '-dbg':
260 debug = 1
261 else:
[1185]262 pywikibot.stdout('Unknown argument "{}". Exiting.'.format(arg))
[1180]263 return
[1169]264
[1180]265 #pywikibot.stdout('The members of the requests.models.Response class are:')
266 #pywikibot.stdout(format(dir(requests.models.Response)))
[1185]267 #return
268
269 # Check specified page or loop through specified category and check all pages
270 site = pywikibot.Site()
[1180]271 if search_cat != '':
272 cat_obj = pywikibot.Category(site, search_cat)
273 generator = pagegenerators.CategorizedPageGenerator(cat_obj, recurse=True)
274 for page in pagegenerators.PreloadingGenerator(generator, 100):
275 if debug: pywikibot.stdout('Checking page "{}"'.format(page.title()))
276 scan_for_interwiki_links(page.text, page.title())
277 elif search_page != '':
278 page = pywikibot.Page(site, search_page)
279 if debug: pywikibot.stdout('Checking page "{}"'.format(page.title()))
280 scan_for_interwiki_links(page.text, page.title())
[1169]281
[1185]282 # Print the results
283 print_summary()
[1169]284
285if __name__ == '__main__':
[1180]286 main()
Note: See TracBrowser for help on using the repository browser.