/usr/share/pyshared/sphinxcontrib/spelling.py is in python-sphinxcontrib.spelling 1.4-1.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""Spelling checker extension for Sphinx.
"""
import __builtin__
import codecs
import collections
import imp
import itertools
import os
import re
import textwrap
import tempfile
import xmlrpclib
#from docutils import core
from docutils.frontend import OptionParser
from docutils.io import StringOutput
import docutils.nodes
from docutils.nodes import GenericNodeVisitor
from docutils.parsers import rst
from docutils.writers import Writer
from sphinx.builders import Builder
from sphinx.util.console import bold, darkgreen
from sphinx.util.console import purple, red, darkgreen, darkgray
from sphinx.util.nodes import inline_all_toctrees
import enchant
from enchant.tokenize import (get_tokenizer, tokenize,
Filter, EmailFilter, WikiWordFilter,
unit_tokenize, wrap_tokenizer,
)
# TODO - Words with multiple uppercase letters treated as classes and ignored
class SpellingDirective(rst.Directive):
"""Custom directive for passing instructions to the spelling checker.
.. spelling::
word1
word2
"""
option_spec = {}
has_content = True
def run(self):
env = self.state.document.settings.env
# Initialize the per-document filters
if not hasattr(env, 'spelling_document_filters'):
env.spelling_document_filters = collections.defaultdict(list)
good_words = []
for entry in self.content:
if not entry:
continue
good_words.extend(entry.split())
if good_words:
env.app.info('Extending local dictionary for %s with %s' % (
env.docname, str(good_words)))
env.spelling_document_filters[env.docname].append(
IgnoreWordsFilterFactory(good_words)
)
return []
class AcronymFilter(Filter):
"""If a word looks like an acronym (all upper case letters),
ignore it.
"""
def _skip(self, word):
return (word == word.upper() # all caps
or
# pluralized acronym ("URLs")
(word[-1].lower() == 's'
and
word[:-1] == word[:-1].upper()
)
)
class list_tokenize(tokenize):
def __init__(self, words):
tokenize.__init__(self, '')
self._words = words
def next(self):
if not self._words:
raise StopIteration()
word = self._words.pop(0)
return (word, 0)
class ContractionFilter(Filter):
"""Strip common contractions from words.
"""
splits = {
"won't":['will', 'not'],
"isn't":['is', 'not'],
"can't":['can', 'not'],
"i'm":['I', 'am'],
}
def _split(self, word):
# Fixed responses
if word.lower() in self.splits:
return list_tokenize(self.splits[word.lower()])
# Possessive
if word.lower().endswith("'s"):
return unit_tokenize(word[:-2])
# * not
if word.lower().endswith("n't"):
return unit_tokenize(word[:-3])
return unit_tokenize(word)
class IgnoreWordsFilter(Filter):
"""Given a set of words, ignore them all.
"""
def __init__(self, tokenizer, word_set):
self.word_set = set(word_set)
Filter.__init__(self, tokenizer)
def _skip(self, word):
return word in self.word_set
class IgnoreWordsFilterFactory(object):
def __init__(self, words):
self.words = words
def __call__(self, tokenizer):
return IgnoreWordsFilter(tokenizer, self.words)
class PyPIFilterFactory(IgnoreWordsFilterFactory):
"""Build an IgnoreWordsFilter for all of the names of packages on PyPI.
"""
def __init__(self):
client = xmlrpclib.ServerProxy('http://pypi.python.org/pypi')
IgnoreWordsFilterFactory.__init__(self, client.list_packages())
class PythonBuiltinsFilter(Filter):
"""Ignore names of built-in Python symbols.
"""
def _skip(self, word):
return hasattr(__builtin__, word)
class ImportableModuleFilter(Filter):
"""Ignore names of modules that we could import.
"""
def __init__(self, tokenizer):
Filter.__init__(self, tokenizer)
self.found_modules = set()
self.sought_modules = set()
def _skip(self, word):
if word not in self.sought_modules:
self.sought_modules.add(word)
try:
imp.find_module(word)
except UnicodeEncodeError:
return False
except ImportError:
return False
else:
self.found_modules.add(word)
return True
return word in self.found_modules
class SpellingChecker(object):
"""Checks the spelling of blocks of text.
Uses options defined in the sphinx configuration file to control
the checking and filtering behavior.
"""
def __init__(self, lang, suggest, word_list_filename, filters=[]):
self.dictionary = enchant.DictWithPWL(lang, word_list_filename)
self.tokenizer = get_tokenizer(lang, filters)
self.original_tokenizer = self.tokenizer
self.suggest = suggest
def push_filters(self, new_filters):
"""Add a filter to the tokenizer chain.
"""
t = self.tokenizer
for f in new_filters:
t = f(t)
self.tokenizer = t
def pop_filters(self):
"""Remove the filters pushed during the last call to push_filters().
"""
self.tokenizer = self.original_tokenizer
def check(self, text):
"""Generator function that yields bad words and suggested alternate spellings.
"""
for word, pos in self.tokenizer(text):
correct = self.dictionary.check(word)
if correct:
continue
yield word, self.dictionary.suggest(word) if self.suggest else []
return
TEXT_NODES = set([ 'block_quote',
'paragraph',
'list_item',
'term',
'definition_list_item',
'title',
])
class SpellingBuilder(Builder):
"""
Spell checks a document
"""
name = 'spelling'
def init(self):
self.docnames = []
self.document_data = []
# Initialize the per-document filters
if not hasattr(self.env, 'spelling_document_filters'):
self.env.spelling_document_filters = collections.defaultdict(list)
# Initialize the global filters
filters = [ ContractionFilter,
EmailFilter,
]
if self.config.spelling_ignore_wiki_words:
filters.append(WikiWordFilter)
if self.config.spelling_ignore_acronyms:
filters.append(AcronymFilter)
if self.config.spelling_ignore_pypi_package_names:
self.info('Adding package names from PyPI to local spelling dictionary...')
filters.append(PyPIFilterFactory())
if self.config.spelling_ignore_python_builtins:
filters.append(PythonBuiltinsFilter)
if self.config.spelling_ignore_importable_modules:
filters.append(ImportableModuleFilter)
filters.extend(self.config.spelling_filters)
project_words = os.path.join(self.srcdir, self.config.spelling_word_list_filename)
self.checker = SpellingChecker(lang=self.config.spelling_lang,
suggest=self.config.spelling_show_suggestions,
word_list_filename=project_words,
filters=filters,
)
self.output_filename = os.path.join(self.outdir, 'output.txt')
self.output = codecs.open(self.output_filename, 'wt', encoding='UTF-8')
def get_outdated_docs(self):
return 'all documents'
def prepare_writing(self, docnames):
return
def get_target_uri(self, docname, typ=None):
return ''
def format_suggestions(self, suggestions):
if not self.config.spelling_show_suggestions or not suggestions:
return u''
return u'[' + u', '.join(u'"%s"' % s for s in suggestions) + u']'
def write_doc(self, docname, doctree):
self.checker.push_filters(self.env.spelling_document_filters[docname])
for node in doctree.traverse(docutils.nodes.Text):
if node.tagname == '#text' and node.parent and node.parent.tagname in TEXT_NODES:
# Figure out the line number for this node by climbing the
# tree until we find a node that has a line number.
lineno = None
parent = node
seen = set()
while lineno is None:
#self.info('looking for line number on %r' % node)
seen.add(parent)
parent = node.parent
if parent is None or parent in seen:
break
lineno = parent.line
filename = self.env.doc2path(docname, base=None)
# Check the text of the node.
for word, suggestions in self.checker.check(node.astext()):
msg_parts = [ docname ]
if lineno:
msg_parts.append(darkgreen('(line %3d)' % lineno))
msg_parts.append(red(word))
msg_parts.append(self.format_suggestions(suggestions))
msg = ' '.join(msg_parts)
self.info(msg)
self.output.write(u"%s:%s: (%s) %s\n" % (
self.env.doc2path(docname, None),
lineno, word,
self.format_suggestions(suggestions),
))
# We found at least one bad spelling, so set the status
# code for the app to a value that indicates an error.
self.app.statuscode = 1
self.checker.pop_filters()
return
def finish(self):
self.output.close()
self.info('Spelling checker messages written to %s' % self.output_filename)
return
def setup(app):
app.info('Initializing Spelling Checker')
app.add_builder(SpellingBuilder)
# Register the 'spelling' directive for setting parameters within a document
app.add_directive('spelling', SpellingDirective)
# Report guesses about correct spelling
app.add_config_value('spelling_show_suggestions', False, 'env')
# Set the language for the text
app.add_config_value('spelling_lang', 'en_US', 'env')
# Set a user-provided list of words known to be spelled properly
app.add_config_value('spelling_word_list_filename', 'spelling_wordlist.txt', 'env')
# Assume anything that looks like a PyPI package name is spelled properly
app.add_config_value('spelling_ignore_pypi_package_names', False, 'env')
# Assume words that look like wiki page names are spelled properly
app.add_config_value('spelling_ignore_wiki_words', True, 'env')
# Assume words that are all caps, or all caps with trailing s, are spelled properly
app.add_config_value('spelling_ignore_acronyms', True, 'env')
# Assume words that are part of __builtins__ are spelled properly
app.add_config_value('spelling_ignore_python_builtins', True, 'env')
# Assume words that look like the names of importable modules are spelled properly
app.add_config_value('spelling_ignore_importable_modules', True, 'env')
# Add any user-defined filter classes
app.add_config_value('spelling_filters', [], 'env')
return
|