source: ValBot/Python/check_interwiki_links.py@ 1178

Last change on this file since 1178 was 1174, checked in by iritscen, 2 years ago

ValBot: check_interwiki_links.py now properly detects and reports links leading to redirect pages on other wikis.

File size: 8.3 KB
Line 
1# Check Interwiki Links
2# by iritscen@yahoo.com
3# Looks at each link on a page (or in all the pages in a category) which uses a registered
4# interwiki prefix and loads the linked page, verifying that it exists and that any section
5# link, if present, is valid as well. The output will use the word "ERROR" when it cannot
6# validate the interwiki link.
7# Recommended viewing width:
8# |---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---|
9
10import os
11
12from urllib.parse import urljoin
13
14import pywikibot
15import re
16import requests # for listing members with dir()
17
18from pywikibot.bot import QuitKeyboardInterrupt
19from pywikibot import pagegenerators
20from pywikibot.tools.formatter import color_format
21from pywikibot.comms.http import fetch
22from pywikibot.specialbots import UploadRobot
23from bs4 import BeautifulSoup
24
25# Parallel arrays based on https://wiki.oni2.net/Special:Interwiki
26interwiki_prefixes = ('acronym', 'cache', 'commons', 'dictionary', 'google', 'metawikimedia', 'mw', 'wikibooks', 'wikidata', 'wikimedia', 'wikinews', 'wikipedia', 'wikiquote', 'wikisource', 'wikispecies', 'wikiversity', 'wikivoyage', 'wikt', 'wiktionary', 'wp')
27
28interwiki_urls = ('http://www.acronymfinder.com/~/search/af.aspx?string=exact&Acronym=', 'http://www.google.com/search?q=cache:', 'https://commons.wikimedia.org/wiki/', 'http://www.dict.org/bin/Dict?Database=*&Form=Dict1&Strategy=*&Query=', 'http://www.google.com/search?q=', 'https://meta.wikimedia.org/wiki/', 'https://www.mediawiki.org/wiki/', 'https://en.wikibooks.org/wiki/', 'https://www.wikidata.org/wiki/', 'https://foundation.wikimedia.org/wiki/', 'https://en.wikinews.org/wiki/', 'https://en.wikipedia.org/wiki/', 'https://en.wikiquote.org/wiki/', 'https://wikisource.org/wiki/', 'https://species.wikimedia.org/wiki/', 'https://en.wikiversity.org/wiki/', 'https://en.wikivoyage.org/wiki/', 'https://en.wiktionary.org/wiki/', 'https://en.wiktionary.org/wiki/', 'https://en.wikipedia.org/wiki/')
29
30pages_checked = 0
31iw_found = 0
32errors_issued = 0
33
34# Searches the given page text for interwiki links
35def scan_for_iw_links(page_text):
36 global pages_checked
37 global iw_found
38 global errors_issued
39 pages_checked = pages_checked + 1
40 cur = 0
41
42 for prefix in interwiki_prefixes:
43 # Isolate strings that start with "[[prefix:" and end with "|" or "]"
44 iw_link = "\[\[" + prefix + ":[^|\]]*(\||\])"
45 for match in re.finditer(iw_link, page_text):
46 # Extract just the page title from this regex match
47 s = match.start() + 2 + len(prefix) + 1
48 e = match.end() - 1
49
50 # Sometimes we used a space char. instead of a '_', so fix that before querying
51 page_title = page_text[s:e].replace(' ', '_')
52
53 # Use only spaces for title when printing it
54 page_title_human = page_title.replace('_', ' ')
55 pywikibot.stdout(' Validating {0} link "{1}"'.format(prefix, page_title_human))
56 iw_found = iw_found + 1
57
58 # Construct full URL for the particular wiki
59 iw_url = interwiki_urls[cur] + page_title
60
61 # Adjust URL if this is a foreign-language WP link
62 if re.match("^[a-zA-Z]{2}:", page_title):
63 lang_code = page_title[0:2] + "."
64 # "wp:" is the Wikipedia: namespace, not a language
65 if lang_code != "wp." and lang_code != "WP.":
66 iw_url = iw_url.replace('en.', lang_code)
67 iw_url = iw_url.replace(page_title[0:3], '')
68
69 # Test the URL
70 response = fetch(iw_url)
71
72 # One way we tell that a redirect occurred is by checking the history
73 if response.history != []:
74 pywikibot.stdout(' ERROR: Got redirection code ({0}) on URL "{1}".'.format(response.history[0], iw_url))
75 errors_issued = errors_issued + 1
76 elif response.status_code != 200:
77 pywikibot.stdout(' ERROR: Got response code {0} on URL "{1}".'.format(response.status_code, iw_url))
78 errors_issued = errors_issued + 1
79 # The usual way that a redirect occurs is that MediaWiki redirects us sneakily
80 # using JavaScript, while returning code OK 200 as if the link was correct; we
81 # must detect this from the page source
82 elif 'Redirected from <a' in response.text:
83 # Extract link from this source which contains name of redirected-to page:
84 # <link rel="canonical" href="https://en.wikipedia.org/wiki/Page_name"/>
85 canonical_name = response.text.split('<link rel="canonical" href="')[-1]
86 prefix_length = len(interwiki_urls[cur])
87 canonical_name = canonical_name[prefix_length:]
88 tag_end = canonical_name.find('"/>')
89 if tag_end == -1:
90 pywikibot.stdout(' ERROR: This is a redirect page (but I could not isolate the correct page name).')
91 else:
92 canonical_name = canonical_name[:tag_end]
93 if len(canonical_name) > 100:
94 # Certain things can cause the trim to fail; here we avoid slamming
95 # the output with massive page source from a failed trim
96 pywikibot.stdout(' ERROR: This is a redirect to "{}" (string trimmed to 100 chars due to excessive length).'.format(canonical_name[:100]))
97 else:
98 canonical_name = canonical_name.replace('_', ' ')
99 pywikibot.stdout(' ERROR: This is a redirect to "{}".'.format(canonical_name))
100 errors_issued = errors_issued + 1
101 elif '#' in page_title:
102 # Isolate section link
103 page_name, anchor_name = page_title.split('#')
104
105 # Convert dot-notation hex entities to proper characters
106 anchor_name = anchor_name.replace('.22', '"')
107 anchor_name = anchor_name.replace('.27', '\'')
108 anchor_name = anchor_name.replace('.28', '(')
109 anchor_name = anchor_name.replace('.29', ')')
110
111 # Read linked page to see if it really has this anchor link
112 soup = BeautifulSoup(response.text, 'html.parser')
113 found_section = False
114 for span_tag in soup.findAll('span'):
115 span_name = span_tag.get('id', None)
116 if span_name == anchor_name:
117 found_section = True
118 break
119 if found_section == False:
120 pywikibot.stdout(' ERROR: Could not find section {0} on page {1}.'.format(anchor_name, page_name))
121 errors_issued = errors_issued + 1
122 cur = cur + 1
123
124def main(*args):
125 cat_name = ''
126 page_name = ''
127
128 local_args = pywikibot.handle_args(args)
129 genFactory = pagegenerators.GeneratorFactory()
130
131 for arg in local_args:
132 if arg.startswith('-cat:'):
133 cat_name = arg[5:]
134 elif arg.startswith('-page:'):
135 page_name = arg[6:]
136
137 site = pywikibot.Site()
138
139 #pywikibot.stdout('The members of the requests.models.Response class are:')
140 #pywikibot.stdout(format(dir(requests.models.Response)))
141
142 if cat_name != '':
143 cat_obj = pywikibot.Category(site, cat_name)
144 generator = pagegenerators.CategorizedPageGenerator(cat_obj, recurse=True)
145 for page in pagegenerators.PreloadingGenerator(generator, 100):
146 pywikibot.stdout('Checking page "{}"'.format(page.title()))
147 scan_for_iw_links(page.text)
148 elif page_name != '':
149 page = pywikibot.Page(site, page_name)
150 pywikibot.stdout('Checking page "{}"'.format(page.title()))
151 scan_for_iw_links(page.text)
152
153 global pages_checked
154 global iw_found
155 global errors_issued
156
157 page_str = "pages"
158 if pages_checked == 1:
159 page_str = "page"
160
161 link_str = "links"
162 if iw_found == 1:
163 link_str = "link"
164
165 pywikibot.stdout('Checked {0} {1} and found {2} interwiki {3}.'.format(pages_checked, page_str, iw_found, link_str))
166
167 error_str = "errors were"
168 if errors_issued == 1:
169 error_str = "error was"
170
171 pywikibot.stdout('{0} {1} encountered in validating these links.'.format(errors_issued, error_str))
172
173if __name__ == '__main__':
174 main()
Note: See TracBrowser for help on using the repository browser.