📄 handler.py
字号:
#
# $Workfile: Handler.py $ $Revision: 14 $
# $Date: 10/07/01 1:43p $ $Author: Sholden $
#
import Cookie
import Session
import MapSite
import SiteSpec
import HomePage
import time
import traceback
import urllib
from Error import Error
from Params import STIMEOUT_INTERVAL, SCHECK_INTERVAL
import asynchat, cgi, rfc822, cStringIO, time, sys
lastflushtime = 0 # session flushing clock
class HTTP(asynchat.async_chat):
def __init__(self, conn, addr, sessions, log):
asynchat.async_chat.__init__(self, conn=conn)
self.addr = addr
self.sessions = sessions
self.ibuffer = []
self.obuffer = ""
self.set_terminator("\r\n\r\n")
self.reading_headers = 1
self.handling = 0
self.cgi_data = None
self.log = log
def collect_incoming_data(self, data):
"""Buffer the data"""
self.ibuffer.append(data)
def found_terminator(self):
if self.reading_headers:
self.reading_headers = 0
self.parse_headers()
if self.op.upper() == "POST":
clen = self.headers.getheader("content-length")
self.set_terminator(int(clen))
else:
self.handle_request()
elif not self.handling:
self.set_terminator(None) # browsers sometimes over-send
self.cgi_data = parse(self.headers, "".join(self.ibuffer))
self.handling = 1
self.ibuffer = []
self.handle_request()
def parse_headers(self):
"""Parse headers as rfc822, authenticate as necessary and generate content.
Note that the standard input remains unread, to allow appropriate
handling (such as CGI dictionary creation) from the page code."""
data = "".join(self.ibuffer)
self.ibuffer = [] # to collect input if any arrives
linebreak = data.find("\n")
self.request = data[:linebreak]
self.headers = rfc822.Message(cStringIO.StringIO(data[linebreak+1:]))
cookies = Cookie.SimpleCookie()
chdr = self.headers.getheader("cookie")
if chdr:
cookies.load(chdr)
try:
now = time.time()
self.sessid, sessid = cookies["session"].value.split(":")
# Raises value error for incorrect cookie format
self.session = self.sessions[self.sessid]
if id(self.session) != int(sessid): # not my session
raise KeyError
elif now - self.session._atime > STIMEOUT_INTERVAL:
del self.sessions[self.sessid] # timed out
raise KeyError
self.session._atime = now
except (KeyError, ValueError):
# New session required because invalid/no cookie or timed out
self.sessions["NEXT"] += 1
self.sessid = str(self.sessions["NEXT"])
self.session = self.sessions[self.sessid] = \
Session.Session(self.sessid)
self.starttime = time.time()
self.op, path, protocol = self.request.split()
if len(path) > 1:
path = path[1:] # removes leading "/" except for root
self.ibuffer = []
if "?" not in path:
query = ""
else:
path, query = path.split("?", 1)
self.path = path.split("/")
self.origpath = self.path[:]
self.query = cgi.parse_qs(query, 1)
for k,v in self.query.items():
self.query[k] = v[0]
def handle_request(self):
try:
try:
gen = MapSite.PageSpec(self.op, self.path, self.query, self.session, self.headers, SiteSpec.PageMap)
except KeyError:
gen = Error404Page("GET", [], {}, self.session, self.headers, Realm=None)
except ValueError: # terminal path with no trailing "/"
URI = "/".join(['']+self.origpath+[''])
if self.query:
URI += "?" + urllib.urlencode(query)
gen = Error301Page(self.op, self.origpath, self.query, self.session, self.headers, Realm=None,
oHeaders=["Location: %s" % URI])
duration = time.time() - self.starttime
self.obuffer = gen.Generate(Input=self.cgi_data)
logentry = (
time.strftime("%d-%m-%Y %H:%M:%S", time.localtime(self.starttime)),
self.addr[0], self.request, gen.ReturnCode(), duration)
self.log.write("%s %s %s %d %.3f\n" % logentry)
self.log.flush() # no logging without flush()
global lastflushtime
now = time.time()
if now - lastflushtime > SCHECK_INTERVAL:
lastflushtime = now
for k in self.sessions.keys():
if k != "NEXT" and \
(now - self.sessions[k]._atime) > STIMEOUT_INTERVAL:
del self.sessions[k]
except: # we don't usually, but this is a server framework!
sys.stderr.write('[%s] [error] Handler: Error while responding to '
'request (%s)\n' % (time.asctime(time.localtime(time.time())), self.request))
sys.stderr.write('Python exception:\n')
traceback.print_exc(file=sys.stderr)
sys.stderr.flush()
output = apply(traceback.format_exception, sys.exc_info())
output = ''.join(output)
output = HTMLEncode(output)
self.obuffer = '''HTTP/1.0 999 Fatal Handler Error
Content-type: text/html
<html><body>
<p><pre>ERROR: PLEASE ACCEPT OUR APOLOGIES FOR
PRESENTING THIS TECHNICAL DATA, WHICH HAS
BEEN LOGGED AND WILL RECEIVE ATTENTION
%s</pre>
</body></html>\n''' % output
sys.stdout.flush()
def writable(self):
return len(self.obuffer) > 0
def handle_write(self):
sent = self.send(self.obuffer)
self.obuffer = self.obuffer[sent:]
if len(self.obuffer) == 0:
self.close()
class Error404Page(HomePage.Home):
def Authenticate(self):
raise Error(404, "Not Found", "", [], "Unable to map resource")
class Error301Page(HomePage.Home):
def Authenticate(self):
dest, link = "unknown location: click for home page", "/"
for h in self.oheaders:
s = h.split()
if s[0] == "Location:":
dest=link = s[1]
break
raise Error(301, "Permanently Moved", "", self.oheaders,
'<P>Moved to <A HREF="%s">%s)</A></P>' % (dest, link))
def parse(headers, data, keep_blank_values=1, strict_parsing=0):
"""Parse a query in the environment or from a file (default stdin)
Stolen from core library cgi module and heavily modified
Arguments, all optional:
headers : rfc822.Message containing headers
data : the CGI data block
keep_blank_values: flag indicating whether blank values in
URL encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
import socket
maxlen = 0
ctype, pdict = cgi.parse_header(headers.getheader("content-type"))
if ctype == 'multipart/form-data':
data = cgi.parse_multipart(fp, pdict)
for k, v in data.items():
if type(v) == type([]) and len(v) == 1:
data[k] = v[0]
return data
elif ctype == 'application/x-www-form-urlencoded':
clength = int(headers.getheader("content-length"))
if maxlen and clength > maxlen:
raise ValueError, 'Maximum content length exceeded'
else:
raise ValueError, "Unknown content type "+ctype
data = cgi.parse_qs(data, keep_blank_values, strict_parsing)
for k, v in data.items():
if type(v) == type([]) and len(v) == 1:
data[k] = v[0]
return data
HTMLCodes = [
['&', '&'],
['<', '<'],
['>', '>'],
['"', '"'],
]
def HTMLEncode(s, codes=HTMLCodes):
''' Returns the HTML encoded version of the given string. This is useful to display a plain ASCII text string on a web page. (We could get this from WebUtils, but we're keeping CGIAdapter independent of everything but standard Python.) '''
for code in codes:
s = s.replace(code[0], code[1])
return s
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -