/usr/share/pyshared/medusa/default_handler.py is in python-medusa 1:0.5.4-7build1.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 | # -*- Mode: Python -*-
#
# Author: Sam Rushing <rushing@nightmare.com>
# Copyright 1997 by Sam Rushing
# All Rights Reserved.
#
RCS_ID = '$Id: default_handler.py,v 1.8 2002/08/01 18:15:45 akuchling Exp $'
# standard python modules
import mimetypes
import re
import stat
import string
# medusa modules
import http_date
import http_server
import status_handler
import producers
unquote = http_server.unquote
# This is the 'default' handler. it implements the base set of
# features expected of a simple file-delivering HTTP server. file
# services are provided through a 'filesystem' object, the very same
# one used by the FTP server.
#
# You can replace or modify this handler if you want a non-standard
# HTTP server. You can also derive your own handler classes from
# it.
#
# support for handling POST requests is available in the derived
# class <default_with_post_handler>, defined below.
#
from counter import counter
class default_handler:
valid_commands = ['GET', 'HEAD']
IDENT = 'Default HTTP Request Handler'
# Pathnames that are tried when a URI resolves to a directory name
directory_defaults = [
'index.html',
'default.html'
]
default_file_producer = producers.file_producer
def __init__ (self, filesystem):
self.filesystem = filesystem
# count total hits
self.hit_counter = counter()
# count file deliveries
self.file_counter = counter()
# count cache hits
self.cache_counter = counter()
hit_counter = 0
def __repr__ (self):
return '<%s (%s hits) at %x>' % (
self.IDENT,
self.hit_counter,
id (self)
)
# always match, since this is a default
def match (self, request):
return 1
# handle a file request, with caching.
def handle_request (self, request):
if request.command not in self.valid_commands:
request.error (400) # bad request
return
self.hit_counter.increment()
path, params, query, fragment = request.split_uri()
if '%' in path:
path = unquote (path)
# strip off all leading slashes
while path and path[0] == '/':
path = path[1:]
if self.filesystem.isdir (path):
if path and path[-1] != '/':
request['Location'] = 'http://%s/%s/' % (
request.channel.server.server_name,
path
)
request.error (301)
return
# we could also generate a directory listing here,
# may want to move this into another method for that
# purpose
found = 0
if path and path[-1] != '/':
path = path + '/'
for default in self.directory_defaults:
p = path + default
if self.filesystem.isfile (p):
path = p
found = 1
break
if not found:
request.error (404) # Not Found
return
elif not self.filesystem.isfile (path):
request.error (404) # Not Found
return
file_length = self.filesystem.stat (path)[stat.ST_SIZE]
ims = get_header_match (IF_MODIFIED_SINCE, request.header)
length_match = 1
if ims:
length = ims.group (4)
if length:
try:
length = string.atoi (length)
if length != file_length:
length_match = 0
except:
pass
ims_date = 0
if ims:
ims_date = http_date.parse_http_date (ims.group (1))
try:
mtime = self.filesystem.stat (path)[stat.ST_MTIME]
except:
request.error (404)
return
if length_match and ims_date:
if mtime <= ims_date:
request.reply_code = 304
request.done()
self.cache_counter.increment()
return
try:
file = self.filesystem.open (path, 'rb')
except IOError:
request.error (404)
return
request['Last-Modified'] = http_date.build_http_date (mtime)
request['Content-Length'] = file_length
self.set_content_type (path, request)
if request.command == 'GET':
request.push (self.default_file_producer (file))
self.file_counter.increment()
request.done()
def set_content_type (self, path, request):
ext = string.lower (get_extension (path))
typ, encoding = mimetypes.guess_type(path)
if typ is not None:
request['Content-Type'] = typ
else:
# TODO: test a chunk off the front of the file for 8-bit
# characters, and use application/octet-stream instead.
request['Content-Type'] = 'text/plain'
def status (self):
return producers.simple_producer (
'<li>%s' % status_handler.html_repr (self)
+ '<ul>'
+ ' <li><b>Total Hits:</b> %s' % self.hit_counter
+ ' <li><b>Files Delivered:</b> %s' % self.file_counter
+ ' <li><b>Cache Hits:</b> %s' % self.cache_counter
+ '</ul>'
)
# HTTP/1.0 doesn't say anything about the "; length=nnnn" addition
# to this header. I suppose its purpose is to avoid the overhead
# of parsing dates...
IF_MODIFIED_SINCE = re.compile (
'If-Modified-Since: ([^;]+)((; length=([0-9]+)$)|$)',
re.IGNORECASE
)
USER_AGENT = re.compile ('User-Agent: (.*)', re.IGNORECASE)
CONTENT_TYPE = re.compile (
r'Content-Type: ([^;]+)((; boundary=([A-Za-z0-9\'\(\)+_,./:=?-]+)$)|$)',
re.IGNORECASE
)
get_header = http_server.get_header
get_header_match = http_server.get_header_match
def get_extension (path):
dirsep = string.rfind (path, '/')
dotsep = string.rfind (path, '.')
if dotsep > dirsep:
return path[dotsep+1:]
else:
return ''
|