📄 cgi.py
字号:
#! /usr/local/bin/python# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is# intentionally NOT "/usr/bin/env python". On many systems# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI# scripts, and /usr/local/bin is the default directory where Python is# installed, so /usr/bin/env would be unable to find python. Granted,# binary installations by Linux vendors often install Python in# /usr/bin. So let those vendors patch cgi.py to match their choice# of installation."""Support module for CGI (Common Gateway Interface) scripts.This module defines a number of utilities for use by CGI scriptswritten in Python."""# XXX Perhaps there should be a slimmed version that doesn't contain# all those backwards compatible and debugging classes and functions?# History# -------## Michael McLay started this module. Steve Majewski changed the# interface to SvFormContentDict and FormContentDict. The multipart# parsing was inspired by code submitted by Andreas Paepcke. Guido van# Rossum rewrote, reformatted and documented the module and is currently# responsible for its maintenance.#__version__ = "2.6"# Imports# =======import sysimport osimport urllibimport mimetoolsimport rfc822import UserDictfrom StringIO import StringIO__all__ = ["MiniFieldStorage", "FieldStorage", "FormContentDict", "SvFormContentDict", "InterpFormContentDict", "FormContent", "parse", "parse_qs", "parse_qsl", "parse_multipart", "parse_header", "print_exception", "print_environ", "print_form", "print_directory", "print_arguments", "print_environ_usage", "escape"]# Logging support# ===============logfile = "" # Filename to log to, if not emptylogfp = None # File object to log to, if not Nonedef initlog(*allargs): """Write a log message, if there is a log file. Even though this function is called initlog(), you should always use log(); log is a variable that is set either to initlog (initially), to dolog (once the log file has been opened), or to nolog (when logging is disabled). The first argument is a format string; the remaining arguments (if any) are arguments to the % operator, so e.g. log("%s: %s", "a", "b") will write "a: b" to the log file, followed by a newline. If the global logfp is not None, it should be a file object to which log data is written. If the global logfp is None, the global logfile may be a string giving a filename to open, in append mode. This file should be world writable!!! If the file can't be opened, logging is silently disabled (since there is no safe place where we could send an error message). """ global logfp, log if logfile and not logfp: try: logfp = open(logfile, "a") except IOError: pass if not logfp: log = nolog else: log = dolog apply(log, allargs)def dolog(fmt, *args): """Write a log message to the log file. See initlog() for docs.""" logfp.write(fmt%args + "\n")def nolog(*allargs): """Dummy function, assigned to log when logging is disabled.""" passlog = initlog # The current logging function# Parsing functions# =================# Maximum input we will accept when REQUEST_METHOD is POST# 0 ==> unlimited inputmaxlen = 0def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0): """Parse a query in the environment or from a file (default stdin) Arguments, all optional: fp : file pointer; default: sys.stdin environ : environment dictionary; default: os.environ keep_blank_values: flag indicating whether blank values in URL encoded forms should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. """ if not fp: fp = sys.stdin if not environ.has_key('REQUEST_METHOD'): environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone if environ['REQUEST_METHOD'] == 'POST': ctype, pdict = parse_header(environ['CONTENT_TYPE']) if ctype == 'multipart/form-data': return parse_multipart(fp, pdict) elif ctype == 'application/x-www-form-urlencoded': clength = int(environ['CONTENT_LENGTH']) if maxlen and clength > maxlen: raise ValueError, 'Maximum content length exceeded' qs = fp.read(clength) else: qs = '' # Unknown content-type if environ.has_key('QUERY_STRING'): if qs: qs = qs + '&' qs = qs + environ['QUERY_STRING'] elif sys.argv[1:]: if qs: qs = qs + '&' qs = qs + sys.argv[1] environ['QUERY_STRING'] = qs # XXX Shouldn't, really elif environ.has_key('QUERY_STRING'): qs = environ['QUERY_STRING'] else: if sys.argv[1:]: qs = sys.argv[1] else: qs = "" environ['QUERY_STRING'] = qs # XXX Shouldn't, really return parse_qs(qs, keep_blank_values, strict_parsing)def parse_qs(qs, keep_blank_values=0, strict_parsing=0): """Parse a query given as a string argument. Arguments: qs: URL-encoded query string to be parsed keep_blank_values: flag indicating whether blank values in URL encoded queries should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. """ dict = {} for name, value in parse_qsl(qs, keep_blank_values, strict_parsing): if dict.has_key(name): dict[name].append(value) else: dict[name] = [value] return dictdef parse_qsl(qs, keep_blank_values=0, strict_parsing=0): """Parse a query given as a string argument. Arguments: qs: URL-encoded query string to be parsed keep_blank_values: flag indicating whether blank values in URL encoded queries should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. Returns a list, as G-d intended. """ pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] r = [] for name_value in pairs: nv = name_value.split('=', 1) if len(nv) != 2: if strict_parsing: raise ValueError, "bad query field: %s" % `name_value` continue if len(nv[1]) or keep_blank_values: name = urllib.unquote(nv[0].replace('+', ' ')) value = urllib.unquote(nv[1].replace('+', ' ')) r.append((name, value)) return rdef parse_multipart(fp, pdict): """Parse multipart input. Arguments: fp : input file pdict: dictionary containing other parameters of conten-type header Returns a dictionary just like parse_qs(): keys are the field names, each value is a list of values for that field. This is easy to use but not much good if you are expecting megabytes to be uploaded -- in that case, use the FieldStorage class instead which is much more flexible. Note that content-type is the raw, unparsed contents of the content-type header. XXX This does not parse nested multipart parts -- use FieldStorage for that. XXX This should really be subsumed by FieldStorage altogether -- no point in having two implementations of the same parsing algorithm. """ boundary = "" if pdict.has_key('boundary'): boundary = pdict['boundary'] if not valid_boundary(boundary): raise ValueError, ('Invalid boundary in multipart form: %s' % `boundary`) nextpart = "--" + boundary lastpart = "--" + boundary + "--" partdict = {} terminator = "" while terminator != lastpart: bytes = -1 data = None if terminator: # At start of next part. Read headers first. headers = mimetools.Message(fp) clength = headers.getheader('content-length') if clength: try: bytes = int(clength) except ValueError: pass if bytes > 0: if maxlen and bytes > maxlen: raise ValueError, 'Maximum content length exceeded' data = fp.read(bytes) else: data = "" # Read lines until end of part. lines = [] while 1: line = fp.readline() if not line: terminator = lastpart # End outer loop break if line[:2] == "--": terminator = line.strip() if terminator in (nextpart, lastpart): break lines.append(line) # Done with part. if data is None: continue if bytes < 0: if lines: # Strip final line terminator line = lines[-1] if line[-2:] == "\r\n": line = line[:-2] elif line[-1:] == "\n": line = line[:-1] lines[-1] = line data = "".join(lines) line = headers['content-disposition'] if not line: continue key, params = parse_header(line) if key != 'form-data': continue if params.has_key('name'): name = params['name'] else: continue if partdict.has_key(name): partdict[name].append(data) else: partdict[name] = [data] return partdictdef parse_header(line): """Parse a Content-type like header. Return the main content-type and a dictionary of options. """ plist = map(lambda x: x.strip(), line.split(';')) key = plist[0].lower() del plist[0] pdict = {} for p in plist: i = p.find('=') if i >= 0: name = p[:i].strip().lower() value = p[i+1:].strip() if len(value) >= 2 and value[0] == value[-1] == '"': value = value[1:-1] pdict[name] = value return key, pdict# Classes for field storage# =========================class MiniFieldStorage: """Like FieldStorage, for use when no file uploads are possible.""" # Dummy attributes
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -