/usr/lib/python2.7/dist-packages/webob/compat.py is in python-webob 1.3.1-1.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 | # code stolen from "six"
import sys
import types
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3: # pragma: no cover
string_types = str,
integer_types = int,
class_types = type,
text_type = str
long = int
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
long = long
# TODO check if errors is ever used
def text_(s, encoding='latin-1', errors='strict'):
if isinstance(s, bytes):
return s.decode(encoding, errors)
return s
def bytes_(s, encoding='latin-1', errors='strict'):
if isinstance(s, text_type):
return s.encode(encoding, errors)
return s
if PY3: # pragma: no cover
def native_(s, encoding='latin-1', errors='strict'):
if isinstance(s, text_type):
return s
return str(s, encoding, errors)
else:
def native_(s, encoding='latin-1', errors='strict'):
if isinstance(s, text_type):
return s.encode(encoding, errors)
return str(s)
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
if PY3: # pragma: no cover
from urllib import parse
urlparse = parse
from urllib.parse import quote as url_quote
from urllib.parse import urlencode as url_encode, quote_plus
from urllib.request import urlopen as url_open
else:
import urlparse
from urllib import quote_plus
from urllib import quote as url_quote
from urllib import unquote as url_unquote
from urllib import urlencode as url_encode
from urllib2 import urlopen as url_open
if PY3: # pragma: no cover
def reraise(exc_info):
etype, exc, tb = exc_info
if exc.__traceback__ is not tb:
raise exc.with_traceback(tb)
raise exc
else: # pragma: no cover
exec("def reraise(exc): raise exc[0], exc[1], exc[2]")
if PY3: # pragma: no cover
def iteritems_(d):
return d.items()
def itervalues_(d):
return d.values()
else:
def iteritems_(d):
return d.iteritems()
def itervalues_(d):
return d.itervalues()
if PY3: # pragma: no cover
def unquote(string):
if not string:
return b''
res = string.split(b'%')
if len(res) != 1:
string = res[0]
for item in res[1:]:
try:
string += bytes([int(item[:2], 16)]) + item[2:]
except ValueError:
string += b'%' + item
return string
def url_unquote(s):
return unquote(s.encode('ascii')).decode('latin-1')
def parse_qsl_text(qs, encoding='utf-8'):
qs = qs.encode('latin-1')
qs = qs.replace(b'+', b' ')
pairs = [s2 for s1 in qs.split(b'&') for s2 in s1.split(b';') if s2]
for name_value in pairs:
nv = name_value.split(b'=', 1)
if len(nv) != 2:
nv.append('')
name = unquote(nv[0])
value = unquote(nv[1])
yield (name.decode(encoding), value.decode(encoding))
else:
from urlparse import parse_qsl
def parse_qsl_text(qs, encoding='utf-8'):
qsl = parse_qsl(
qs,
keep_blank_values=True,
strict_parsing=False
)
for (x, y) in qsl:
yield (x.decode(encoding), y.decode(encoding))
if PY3: # pragma no cover
from html import escape
else:
from cgi import escape
|