ref: e6b4528626528c2bf11f760bfc660375195e6055
author: Ori Bernstein <ori@eigenstate.org>
date: Tue Jun 8 19:10:51 EDT 2021
python: archive it
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/BaseHTTPServer.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,578 @@
+"""HTTP server base class.
+
+Note: the class in this module doesn't implement any HTTP request; see
+SimpleHTTPServer for simple implementations of GET, HEAD and POST
+(including CGI scripts). It does, however, optionally implement HTTP/1.1
+persistent connections, as of version 0.3.
+
+Contents:
+
+- BaseHTTPRequestHandler: HTTP request handler base class
+- test: test function
+
+XXX To do:
+
+- log requests even later (to capture byte count)
+- log user-agent header and other interesting goodies
+- send error log to separate file
+"""
+
+
+# See also:
+#
+# HTTP Working Group T. Berners-Lee
+# INTERNET-DRAFT R. T. Fielding
+# <draft-ietf-http-v10-spec-00.txt> H. Frystyk Nielsen
+# Expires September 8, 1995 March 8, 1995
+#
+# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt
+#
+# and
+#
+# Network Working Group R. Fielding
+# Request for Comments: 2616 et al
+# Obsoletes: 2068 June 1999
+# Category: Standards Track
+#
+# URL: http://www.faqs.org/rfcs/rfc2616.html
+
+# Log files
+# ---------
+#
+# Here's a quote from the NCSA httpd docs about log file format.
+#
+# | The logfile format is as follows. Each line consists of:
+# |
+# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb
+# |
+# | host: Either the DNS name or the IP number of the remote client
+# | rfc931: Any information returned by identd for this person,
+# | - otherwise.
+# | authuser: If user sent a userid for authentication, the user name,
+# | - otherwise.
+# | DD: Day
+# | Mon: Month (calendar name)
+# | YYYY: Year
+# | hh: hour (24-hour format, the machine's timezone)
+# | mm: minutes
+# | ss: seconds
+# | request: The first line of the HTTP request as sent by the client.
+# | ddd: the status code returned by the server, - if not available.
+# | bbbb: the total number of bytes sent,
+# | *not including the HTTP/1.0 header*, - if not available
+# |
+# | You can determine the name of the file accessed through request.
+#
+# (Actually, the latter is only true if you know the server configuration
+# at the time the request was made!)
+
+__version__ = "0.3"
+
+__all__ = ["HTTPServer", "BaseHTTPRequestHandler"]
+
+import sys
+import time
+import socket # For gethostbyaddr()
+import mimetools
+import SocketServer
+
+# Default error message
+DEFAULT_ERROR_MESSAGE = """\
+<head>
+<title>Error response</title>
+</head>
+<body>
+<h1>Error response</h1>
+<p>Error code %(code)d.
+<p>Message: %(message)s.
+<p>Error code explanation: %(code)s = %(explain)s.
+</body>
+"""
+
+def _quote_html(html):
+ return html.replace("&", "&").replace("<", "<").replace(">", ">")
+
+class HTTPServer(SocketServer.TCPServer):
+
+ allow_reuse_address = 1 # Seems to make sense in testing environment
+
+ def server_bind(self):
+ """Override server_bind to store the server name."""
+ SocketServer.TCPServer.server_bind(self)
+ host, port = self.socket.getsockname()[:2]
+ self.server_name = socket.getfqdn(host)
+ self.server_port = port
+
+
+class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
+
+ """HTTP request handler base class.
+
+ The following explanation of HTTP serves to guide you through the
+ code as well as to expose any misunderstandings I may have about
+ HTTP (so you don't need to read the code to figure out I'm wrong
+ :-).
+
+ HTTP (HyperText Transfer Protocol) is an extensible protocol on
+ top of a reliable stream transport (e.g. TCP/IP). The protocol
+ recognizes three parts to a request:
+
+ 1. One line identifying the request type and path
+ 2. An optional set of RFC-822-style headers
+ 3. An optional data part
+
+ The headers and data are separated by a blank line.
+
+ The first line of the request has the form
+
+ <command> <path> <version>
+
+ where <command> is a (case-sensitive) keyword such as GET or POST,
+ <path> is a string containing path information for the request,
+ and <version> should be the string "HTTP/1.0" or "HTTP/1.1".
+ <path> is encoded using the URL encoding scheme (using %xx to signify
+ the ASCII character with hex code xx).
+
+ The specification specifies that lines are separated by CRLF but
+ for compatibility with the widest range of clients recommends
+ servers also handle LF. Similarly, whitespace in the request line
+ is treated sensibly (allowing multiple spaces between components
+ and allowing trailing whitespace).
+
+ Similarly, for output, lines ought to be separated by CRLF pairs
+ but most clients grok LF characters just fine.
+
+ If the first line of the request has the form
+
+ <command> <path>
+
+ (i.e. <version> is left out) then this is assumed to be an HTTP
+ 0.9 request; this form has no optional headers and data part and
+ the reply consists of just the data.
+
+ The reply form of the HTTP 1.x protocol again has three parts:
+
+ 1. One line giving the response code
+ 2. An optional set of RFC-822-style headers
+ 3. The data
+
+ Again, the headers and data are separated by a blank line.
+
+ The response code line has the form
+
+ <version> <responsecode> <responsestring>
+
+ where <version> is the protocol version ("HTTP/1.0" or "HTTP/1.1"),
+ <responsecode> is a 3-digit response code indicating success or
+ failure of the request, and <responsestring> is an optional
+ human-readable string explaining what the response code means.
+
+ This server parses the request and the headers, and then calls a
+ function specific to the request type (<command>). Specifically,
+ a request SPAM will be handled by a method do_SPAM(). If no
+ such method exists the server sends an error response to the
+ client. If it exists, it is called with no arguments:
+
+ do_SPAM()
+
+ Note that the request name is case sensitive (i.e. SPAM and spam
+ are different requests).
+
+ The various request details are stored in instance variables:
+
+ - client_address is the client IP address in the form (host,
+ port);
+
+ - command, path and version are the broken-down request line;
+
+ - headers is an instance of mimetools.Message (or a derived
+ class) containing the header information;
+
+ - rfile is a file object open for reading positioned at the
+ start of the optional input data part;
+
+ - wfile is a file object open for writing.
+
+ IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
+
+ The first thing to be written must be the response line. Then
+ follow 0 or more header lines, then a blank line, and then the
+ actual data (if any). The meaning of the header lines depends on
+ the command executed by the server; in most cases, when data is
+ returned, there should be at least one header line of the form
+
+ Content-type: <type>/<subtype>
+
+ where <type> and <subtype> should be registered MIME types,
+ e.g. "text/html" or "text/plain".
+
+ """
+
+ # The Python system version, truncated to its first component.
+ sys_version = "Python/" + sys.version.split()[0]
+
+ # The server software version. You may want to override this.
+ # The format is multiple whitespace-separated strings,
+ # where each string is of the form name[/version].
+ server_version = "BaseHTTP/" + __version__
+
+ def parse_request(self):
+ """Parse a request (internal).
+
+ The request should be stored in self.raw_requestline; the results
+ are in self.command, self.path, self.request_version and
+ self.headers.
+
+ Return True for success, False for failure; on failure, an
+ error is sent back.
+
+ """
+ self.command = None # set in case of error on the first line
+ self.request_version = version = "HTTP/0.9" # Default
+ self.close_connection = 1
+ requestline = self.raw_requestline
+ if requestline[-2:] == '\r\n':
+ requestline = requestline[:-2]
+ elif requestline[-1:] == '\n':
+ requestline = requestline[:-1]
+ self.requestline = requestline
+ words = requestline.split()
+ if len(words) == 3:
+ [command, path, version] = words
+ if version[:5] != 'HTTP/':
+ self.send_error(400, "Bad request version (%r)" % version)
+ return False
+ try:
+ base_version_number = version.split('/', 1)[1]
+ version_number = base_version_number.split(".")
+ # RFC 2145 section 3.1 says there can be only one "." and
+ # - major and minor numbers MUST be treated as
+ # separate integers;
+ # - HTTP/2.4 is a lower version than HTTP/2.13, which in
+ # turn is lower than HTTP/12.3;
+ # - Leading zeros MUST be ignored by recipients.
+ if len(version_number) != 2:
+ raise ValueError
+ version_number = int(version_number[0]), int(version_number[1])
+ except (ValueError, IndexError):
+ self.send_error(400, "Bad request version (%r)" % version)
+ return False
+ if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1":
+ self.close_connection = 0
+ if version_number >= (2, 0):
+ self.send_error(505,
+ "Invalid HTTP Version (%s)" % base_version_number)
+ return False
+ elif len(words) == 2:
+ [command, path] = words
+ self.close_connection = 1
+ if command != 'GET':
+ self.send_error(400,
+ "Bad HTTP/0.9 request type (%r)" % command)
+ return False
+ elif not words:
+ return False
+ else:
+ self.send_error(400, "Bad request syntax (%r)" % requestline)
+ return False
+ self.command, self.path, self.request_version = command, path, version
+
+ # Examine the headers and look for a Connection directive
+ self.headers = self.MessageClass(self.rfile, 0)
+
+ conntype = self.headers.get('Connection', "")
+ if conntype.lower() == 'close':
+ self.close_connection = 1
+ elif (conntype.lower() == 'keep-alive' and
+ self.protocol_version >= "HTTP/1.1"):
+ self.close_connection = 0
+ return True
+
+ def handle_one_request(self):
+ """Handle a single HTTP request.
+
+ You normally don't need to override this method; see the class
+ __doc__ string for information on how to handle specific HTTP
+ commands such as GET and POST.
+
+ """
+ self.raw_requestline = self.rfile.readline()
+ if not self.raw_requestline:
+ self.close_connection = 1
+ return
+ if not self.parse_request(): # An error code has been sent, just exit
+ return
+ mname = 'do_' + self.command
+ if not hasattr(self, mname):
+ self.send_error(501, "Unsupported method (%r)" % self.command)
+ return
+ method = getattr(self, mname)
+ method()
+
+ def handle(self):
+ """Handle multiple requests if necessary."""
+ self.close_connection = 1
+
+ self.handle_one_request()
+ while not self.close_connection:
+ self.handle_one_request()
+
+ def send_error(self, code, message=None):
+ """Send and log an error reply.
+
+ Arguments are the error code, and a detailed message.
+ The detailed message defaults to the short entry matching the
+ response code.
+
+ This sends an error response (so it must be called before any
+ output has been generated), logs the error, and finally sends
+ a piece of HTML explaining the error to the user.
+
+ """
+
+ try:
+ short, long = self.responses[code]
+ except KeyError:
+ short, long = '???', '???'
+ if message is None:
+ message = short
+ explain = long
+ self.log_error("code %d, message %s", code, message)
+ # using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
+ content = (self.error_message_format %
+ {'code': code, 'message': _quote_html(message), 'explain': explain})
+ self.send_response(code, message)
+ self.send_header("Content-Type", "text/html")
+ self.send_header('Connection', 'close')
+ self.end_headers()
+ if self.command != 'HEAD' and code >= 200 and code not in (204, 304):
+ self.wfile.write(content)
+
+ error_message_format = DEFAULT_ERROR_MESSAGE
+
+ def send_response(self, code, message=None):
+ """Send the response header and log the response code.
+
+ Also send two standard headers with the server software
+ version and the current date.
+
+ """
+ self.log_request(code)
+ if message is None:
+ if code in self.responses:
+ message = self.responses[code][0]
+ else:
+ message = ''
+ if self.request_version != 'HTTP/0.9':
+ self.wfile.write("%s %d %s\r\n" %
+ (self.protocol_version, code, message))
+ # print (self.protocol_version, code, message)
+ self.send_header('Server', self.version_string())
+ self.send_header('Date', self.date_time_string())
+
+ def send_header(self, keyword, value):
+ """Send a MIME header."""
+ if self.request_version != 'HTTP/0.9':
+ self.wfile.write("%s: %s\r\n" % (keyword, value))
+
+ if keyword.lower() == 'connection':
+ if value.lower() == 'close':
+ self.close_connection = 1
+ elif value.lower() == 'keep-alive':
+ self.close_connection = 0
+
+ def end_headers(self):
+ """Send the blank line ending the MIME headers."""
+ if self.request_version != 'HTTP/0.9':
+ self.wfile.write("\r\n")
+
+ def log_request(self, code='-', size='-'):
+ """Log an accepted request.
+
+ This is called by send_response().
+
+ """
+
+ self.log_message('"%s" %s %s',
+ self.requestline, str(code), str(size))
+
+ def log_error(self, *args):
+ """Log an error.
+
+ This is called when a request cannot be fulfilled. By
+ default it passes the message on to log_message().
+
+ Arguments are the same as for log_message().
+
+ XXX This should go to the separate error log.
+
+ """
+
+ self.log_message(*args)
+
+ def log_message(self, format, *args):
+ """Log an arbitrary message.
+
+ This is used by all other logging functions. Override
+ it if you have specific logging wishes.
+
+ The first argument, FORMAT, is a format string for the
+ message to be logged. If the format string contains
+ any % escapes requiring parameters, they should be
+ specified as subsequent arguments (it's just like
+ printf!).
+
+ The client host and current date/time are prefixed to
+ every message.
+
+ """
+
+ sys.stderr.write("%s - - [%s] %s\n" %
+ (self.address_string(),
+ self.log_date_time_string(),
+ format%args))
+
+ def version_string(self):
+ """Return the server software version string."""
+ return self.server_version + ' ' + self.sys_version
+
+ def date_time_string(self, timestamp=None):
+ """Return the current date and time formatted for a message header."""
+ if timestamp is None:
+ timestamp = time.time()
+ year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
+ s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
+ self.weekdayname[wd],
+ day, self.monthname[month], year,
+ hh, mm, ss)
+ return s
+
+ def log_date_time_string(self):
+ """Return the current time formatted for logging."""
+ now = time.time()
+ year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
+ s = "%02d/%3s/%04d %02d:%02d:%02d" % (
+ day, self.monthname[month], year, hh, mm, ss)
+ return s
+
+ weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
+
+ monthname = [None,
+ 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+ 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
+
+ def address_string(self):
+ """Return the client address formatted for logging.
+
+ This version looks up the full hostname using gethostbyaddr(),
+ and tries to find a name that contains at least one dot.
+
+ """
+
+ host, port = self.client_address[:2]
+ return socket.getfqdn(host)
+
+ # Essentially static class variables
+
+ # The version of the HTTP protocol we support.
+ # Set this to HTTP/1.1 to enable automatic keepalive
+ protocol_version = "HTTP/1.0"
+
+ # The Message-like class used to parse headers
+ MessageClass = mimetools.Message
+
+ # Table mapping response codes to messages; entries have the
+ # form {code: (shortmessage, longmessage)}.
+ # See RFC 2616.
+ responses = {
+ 100: ('Continue', 'Request received, please continue'),
+ 101: ('Switching Protocols',
+ 'Switching to new protocol; obey Upgrade header'),
+
+ 200: ('OK', 'Request fulfilled, document follows'),
+ 201: ('Created', 'Document created, URL follows'),
+ 202: ('Accepted',
+ 'Request accepted, processing continues off-line'),
+ 203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
+ 204: ('No Content', 'Request fulfilled, nothing follows'),
+ 205: ('Reset Content', 'Clear input form for further input.'),
+ 206: ('Partial Content', 'Partial content follows.'),
+
+ 300: ('Multiple Choices',
+ 'Object has several resources -- see URI list'),
+ 301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
+ 302: ('Found', 'Object moved temporarily -- see URI list'),
+ 303: ('See Other', 'Object moved -- see Method and URL list'),
+ 304: ('Not Modified',
+ 'Document has not changed since given time'),
+ 305: ('Use Proxy',
+ 'You must use proxy specified in Location to access this '
+ 'resource.'),
+ 307: ('Temporary Redirect',
+ 'Object moved temporarily -- see URI list'),
+
+ 400: ('Bad Request',
+ 'Bad request syntax or unsupported method'),
+ 401: ('Unauthorized',
+ 'No permission -- see authorization schemes'),
+ 402: ('Payment Required',
+ 'No payment -- see charging schemes'),
+ 403: ('Forbidden',
+ 'Request forbidden -- authorization will not help'),
+ 404: ('Not Found', 'Nothing matches the given URI'),
+ 405: ('Method Not Allowed',
+ 'Specified method is invalid for this server.'),
+ 406: ('Not Acceptable', 'URI not available in preferred format.'),
+ 407: ('Proxy Authentication Required', 'You must authenticate with '
+ 'this proxy before proceeding.'),
+ 408: ('Request Timeout', 'Request timed out; try again later.'),
+ 409: ('Conflict', 'Request conflict.'),
+ 410: ('Gone',
+ 'URI no longer exists and has been permanently removed.'),
+ 411: ('Length Required', 'Client must specify Content-Length.'),
+ 412: ('Precondition Failed', 'Precondition in headers is false.'),
+ 413: ('Request Entity Too Large', 'Entity is too large.'),
+ 414: ('Request-URI Too Long', 'URI is too long.'),
+ 415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
+ 416: ('Requested Range Not Satisfiable',
+ 'Cannot satisfy request range.'),
+ 417: ('Expectation Failed',
+ 'Expect condition could not be satisfied.'),
+
+ 500: ('Internal Server Error', 'Server got itself in trouble'),
+ 501: ('Not Implemented',
+ 'Server does not support this operation'),
+ 502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
+ 503: ('Service Unavailable',
+ 'The server cannot process the request due to a high load'),
+ 504: ('Gateway Timeout',
+ 'The gateway server did not receive a timely response'),
+ 505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
+ }
+
+
+def test(HandlerClass = BaseHTTPRequestHandler,
+ ServerClass = HTTPServer, protocol="HTTP/1.0"):
+ """Test the HTTP request handler class.
+
+ This runs an HTTP server on port 8000 (or the first command line
+ argument).
+
+ """
+
+ if sys.argv[1:]:
+ port = int(sys.argv[1])
+ else:
+ port = 8000
+ server_address = ('', port)
+
+ HandlerClass.protocol_version = protocol
+ httpd = ServerClass(server_address, HandlerClass)
+
+ sa = httpd.socket.getsockname()
+ print "Serving HTTP on", sa[0], "port", sa[1], "..."
+ httpd.serve_forever()
+
+
+if __name__ == '__main__':
+ test()
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/Bastion.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,177 @@
+"""Bastionification utility.
+
+A bastion (for another object -- the 'original') is an object that has
+the same methods as the original but does not give access to its
+instance variables. Bastions have a number of uses, but the most
+obvious one is to provide code executing in restricted mode with a
+safe interface to an object implemented in unrestricted mode.
+
+The bastionification routine has an optional second argument which is
+a filter function. Only those methods for which the filter method
+(called with the method name as argument) returns true are accessible.
+The default filter method returns true unless the method name begins
+with an underscore.
+
+There are a number of possible implementations of bastions. We use a
+'lazy' approach where the bastion's __getattr__() discipline does all
+the work for a particular method the first time it is used. This is
+usually fastest, especially if the user doesn't call all available
+methods. The retrieved methods are stored as instance variables of
+the bastion, so the overhead is only occurred on the first use of each
+method.
+
+Detail: the bastion class has a __repr__() discipline which includes
+the repr() of the original object. This is precomputed when the
+bastion is created.
+
+"""
+
+__all__ = ["BastionClass", "Bastion"]
+
+from types import MethodType
+
+
+class BastionClass:
+
+ """Helper class used by the Bastion() function.
+
+ You could subclass this and pass the subclass as the bastionclass
+ argument to the Bastion() function, as long as the constructor has
+ the same signature (a get() function and a name for the object).
+
+ """
+
+ def __init__(self, get, name):
+ """Constructor.
+
+ Arguments:
+
+ get - a function that gets the attribute value (by name)
+ name - a human-readable name for the original object
+ (suggestion: use repr(object))
+
+ """
+ self._get_ = get
+ self._name_ = name
+
+ def __repr__(self):
+ """Return a representation string.
+
+ This includes the name passed in to the constructor, so that
+ if you print the bastion during debugging, at least you have
+ some idea of what it is.
+
+ """
+ return "<Bastion for %s>" % self._name_
+
+ def __getattr__(self, name):
+ """Get an as-yet undefined attribute value.
+
+ This calls the get() function that was passed to the
+ constructor. The result is stored as an instance variable so
+ that the next time the same attribute is requested,
+ __getattr__() won't be invoked.
+
+ If the get() function raises an exception, this is simply
+ passed on -- exceptions are not cached.
+
+ """
+ attribute = self._get_(name)
+ self.__dict__[name] = attribute
+ return attribute
+
+
+def Bastion(object, filter = lambda name: name[:1] != '_',
+ name=None, bastionclass=BastionClass):
+ """Create a bastion for an object, using an optional filter.
+
+ See the Bastion module's documentation for background.
+
+ Arguments:
+
+ object - the original object
+ filter - a predicate that decides whether a function name is OK;
+ by default all names are OK that don't start with '_'
+ name - the name of the object; default repr(object)
+ bastionclass - class used to create the bastion; default BastionClass
+
+ """
+
+ raise RuntimeError, "This code is not secure in Python 2.2 and 2.3"
+
+ # Note: we define *two* ad-hoc functions here, get1 and get2.
+ # Both are intended to be called in the same way: get(name).
+ # It is clear that the real work (getting the attribute
+ # from the object and calling the filter) is done in get1.
+ # Why can't we pass get1 to the bastion? Because the user
+ # would be able to override the filter argument! With get2,
+ # overriding the default argument is no security loophole:
+ # all it does is call it.
+ # Also notice that we can't place the object and filter as
+ # instance variables on the bastion object itself, since
+ # the user has full access to all instance variables!
+
+ def get1(name, object=object, filter=filter):
+ """Internal function for Bastion(). See source comments."""
+ if filter(name):
+ attribute = getattr(object, name)
+ if type(attribute) == MethodType:
+ return attribute
+ raise AttributeError, name
+
+ def get2(name, get1=get1):
+ """Internal function for Bastion(). See source comments."""
+ return get1(name)
+
+ if name is None:
+ name = repr(object)
+ return bastionclass(get2, name)
+
+
+def _test():
+ """Test the Bastion() function."""
+ class Original:
+ def __init__(self):
+ self.sum = 0
+ def add(self, n):
+ self._add(n)
+ def _add(self, n):
+ self.sum = self.sum + n
+ def total(self):
+ return self.sum
+ o = Original()
+ b = Bastion(o)
+ testcode = """if 1:
+ b.add(81)
+ b.add(18)
+ print "b.total() =", b.total()
+ try:
+ print "b.sum =", b.sum,
+ except:
+ print "inaccessible"
+ else:
+ print "accessible"
+ try:
+ print "b._add =", b._add,
+ except:
+ print "inaccessible"
+ else:
+ print "accessible"
+ try:
+ print "b._get_.func_defaults =", map(type, b._get_.func_defaults),
+ except:
+ print "inaccessible"
+ else:
+ print "accessible"
+ \n"""
+ exec testcode
+ print '='*20, "Using rexec:", '='*20
+ import rexec
+ r = rexec.RExec()
+ m = r.add_module('__main__')
+ m.b = b
+ r.r_exec(testcode)
+
+
+if __name__ == '__main__':
+ _test()
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/CGIHTTPServer.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,362 @@
+"""CGI-savvy HTTP Server.
+
+This module builds on SimpleHTTPServer by implementing GET and POST
+requests to cgi-bin scripts.
+
+If the os.fork() function is not present (e.g. on Windows),
+os.popen2() is used as a fallback, with slightly altered semantics; if
+that function is not present either (e.g. on Macintosh), only Python
+scripts are supported, and they are executed by the current process.
+
+In all cases, the implementation is intentionally naive -- all
+requests are executed sychronously.
+
+SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL
+-- it may execute arbitrary Python code or external programs.
+
+Note that status code 200 is sent prior to execution of a CGI script, so
+scripts cannot send other status codes such as 302 (redirect).
+"""
+
+
+__version__ = "0.4"
+
+__all__ = ["CGIHTTPRequestHandler"]
+
+import os
+import sys
+import urllib
+import BaseHTTPServer
+import SimpleHTTPServer
+import select
+
+
+class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
+
+ """Complete HTTP server with GET, HEAD and POST commands.
+
+ GET and HEAD also support running CGI scripts.
+
+ The POST command is *only* implemented for CGI scripts.
+
+ """
+
+ # Determine platform specifics
+ have_fork = hasattr(os, 'fork')
+ have_popen2 = hasattr(os, 'popen2')
+ have_popen3 = hasattr(os, 'popen3')
+
+ # Make rfile unbuffered -- we need to read one line and then pass
+ # the rest to a subprocess, so we can't use buffered input.
+ rbufsize = 0
+
+ def do_POST(self):
+ """Serve a POST request.
+
+ This is only implemented for CGI scripts.
+
+ """
+
+ if self.is_cgi():
+ self.run_cgi()
+ else:
+ self.send_error(501, "Can only POST to CGI scripts")
+
+ def send_head(self):
+ """Version of send_head that support CGI scripts"""
+ if self.is_cgi():
+ return self.run_cgi()
+ else:
+ return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
+
+ def is_cgi(self):
+ """Test whether self.path corresponds to a CGI script.
+
+ Return a tuple (dir, rest) if self.path requires running a
+ CGI script, None if not. Note that rest begins with a
+ slash if it is not empty.
+
+ The default implementation tests whether the path
+ begins with one of the strings in the list
+ self.cgi_directories (and the next character is a '/'
+ or the end of the string).
+
+ """
+
+ path = self.path
+
+ for x in self.cgi_directories:
+ i = len(x)
+ if path[:i] == x and (not path[i:] or path[i] == '/'):
+ self.cgi_info = path[:i], path[i+1:]
+ return True
+ return False
+
+ cgi_directories = ['/cgi-bin', '/htbin']
+
+ def is_executable(self, path):
+ """Test whether argument path is an executable file."""
+ return executable(path)
+
+ def is_python(self, path):
+ """Test whether argument path is a Python script."""
+ head, tail = os.path.splitext(path)
+ return tail.lower() in (".py", ".pyw")
+
+ def run_cgi(self):
+ """Execute a CGI script."""
+ path = self.path
+ dir, rest = self.cgi_info
+
+ i = path.find('/', len(dir) + 1)
+ while i >= 0:
+ nextdir = path[:i]
+ nextrest = path[i+1:]
+
+ scriptdir = self.translate_path(nextdir)
+ if os.path.isdir(scriptdir):
+ dir, rest = nextdir, nextrest
+ i = path.find('/', len(dir) + 1)
+ else:
+ break
+
+ # find an explicit query string, if present.
+ i = rest.rfind('?')
+ if i >= 0:
+ rest, query = rest[:i], rest[i+1:]
+ else:
+ query = ''
+
+ # dissect the part after the directory name into a script name &
+ # a possible additional path, to be stored in PATH_INFO.
+ i = rest.find('/')
+ if i >= 0:
+ script, rest = rest[:i], rest[i:]
+ else:
+ script, rest = rest, ''
+
+ scriptname = dir + '/' + script
+ scriptfile = self.translate_path(scriptname)
+ if not os.path.exists(scriptfile):
+ self.send_error(404, "No such CGI script (%r)" % scriptname)
+ return
+ if not os.path.isfile(scriptfile):
+ self.send_error(403, "CGI script is not a plain file (%r)" %
+ scriptname)
+ return
+ ispy = self.is_python(scriptname)
+ if not ispy:
+ if not (self.have_fork or self.have_popen2 or self.have_popen3):
+ self.send_error(403, "CGI script is not a Python script (%r)" %
+ scriptname)
+ return
+ if not self.is_executable(scriptfile):
+ self.send_error(403, "CGI script is not executable (%r)" %
+ scriptname)
+ return
+
+ # Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
+ # XXX Much of the following could be prepared ahead of time!
+ env = {}
+ env['SERVER_SOFTWARE'] = self.version_string()
+ env['SERVER_NAME'] = self.server.server_name
+ env['GATEWAY_INTERFACE'] = 'CGI/1.1'
+ env['SERVER_PROTOCOL'] = self.protocol_version
+ env['SERVER_PORT'] = str(self.server.server_port)
+ env['REQUEST_METHOD'] = self.command
+ uqrest = urllib.unquote(rest)
+ env['PATH_INFO'] = uqrest
+ env['PATH_TRANSLATED'] = self.translate_path(uqrest)
+ env['SCRIPT_NAME'] = scriptname
+ if query:
+ env['QUERY_STRING'] = query
+ host = self.address_string()
+ if host != self.client_address[0]:
+ env['REMOTE_HOST'] = host
+ env['REMOTE_ADDR'] = self.client_address[0]
+ authorization = self.headers.getheader("authorization")
+ if authorization:
+ authorization = authorization.split()
+ if len(authorization) == 2:
+ import base64, binascii
+ env['AUTH_TYPE'] = authorization[0]
+ if authorization[0].lower() == "basic":
+ try:
+ authorization = base64.decodestring(authorization[1])
+ except binascii.Error:
+ pass
+ else:
+ authorization = authorization.split(':')
+ if len(authorization) == 2:
+ env['REMOTE_USER'] = authorization[0]
+ # XXX REMOTE_IDENT
+ if self.headers.typeheader is None:
+ env['CONTENT_TYPE'] = self.headers.type
+ else:
+ env['CONTENT_TYPE'] = self.headers.typeheader
+ length = self.headers.getheader('content-length')
+ if length:
+ env['CONTENT_LENGTH'] = length
+ accept = []
+ for line in self.headers.getallmatchingheaders('accept'):
+ if line[:1] in "\t\n\r ":
+ accept.append(line.strip())
+ else:
+ accept = accept + line[7:].split(',')
+ env['HTTP_ACCEPT'] = ','.join(accept)
+ ua = self.headers.getheader('user-agent')
+ if ua:
+ env['HTTP_USER_AGENT'] = ua
+ co = filter(None, self.headers.getheaders('cookie'))
+ if co:
+ env['HTTP_COOKIE'] = ', '.join(co)
+ # XXX Other HTTP_* headers
+ # Since we're setting the env in the parent, provide empty
+ # values to override previously set values
+ for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
+ 'HTTP_USER_AGENT', 'HTTP_COOKIE'):
+ env.setdefault(k, "")
+ os.environ.update(env)
+
+ self.send_response(200, "Script output follows")
+
+ decoded_query = query.replace('+', ' ')
+
+ if self.have_fork:
+ # Unix -- fork as we should
+ args = [script]
+ if '=' not in decoded_query:
+ args.append(decoded_query)
+ nobody = nobody_uid()
+ self.wfile.flush() # Always flush before forking
+ pid = os.fork()
+ if pid != 0:
+ # Parent
+ pid, sts = os.waitpid(pid, 0)
+ # throw away additional data [see bug #427345]
+ while select.select([self.rfile], [], [], 0)[0]:
+ if not self.rfile.read(1):
+ break
+ if sts:
+ self.log_error("CGI script exit status %#x", sts)
+ return
+ # Child
+ try:
+ try:
+ os.setuid(nobody)
+ except os.error:
+ pass
+ os.dup2(self.rfile.fileno(), 0)
+ os.dup2(self.wfile.fileno(), 1)
+ os.execve(scriptfile, args, os.environ)
+ except:
+ self.server.handle_error(self.request, self.client_address)
+ os._exit(127)
+
+ elif self.have_popen2 or self.have_popen3:
+ # Windows -- use popen2 or popen3 to create a subprocess
+ import shutil
+ if self.have_popen3:
+ popenx = os.popen3
+ else:
+ popenx = os.popen2
+ cmdline = scriptfile
+ if self.is_python(scriptfile):
+ interp = sys.executable
+ if interp.lower().endswith("w.exe"):
+ # On Windows, use python.exe, not pythonw.exe
+ interp = interp[:-5] + interp[-4:]
+ cmdline = "%s -u %s" % (interp, cmdline)
+ if '=' not in query and '"' not in query:
+ cmdline = '%s "%s"' % (cmdline, query)
+ self.log_message("command: %s", cmdline)
+ try:
+ nbytes = int(length)
+ except (TypeError, ValueError):
+ nbytes = 0
+ files = popenx(cmdline, 'b')
+ fi = files[0]
+ fo = files[1]
+ if self.have_popen3:
+ fe = files[2]
+ if self.command.lower() == "post" and nbytes > 0:
+ data = self.rfile.read(nbytes)
+ fi.write(data)
+ # throw away additional data [see bug #427345]
+ while select.select([self.rfile._sock], [], [], 0)[0]:
+ if not self.rfile._sock.recv(1):
+ break
+ fi.close()
+ shutil.copyfileobj(fo, self.wfile)
+ if self.have_popen3:
+ errors = fe.read()
+ fe.close()
+ if errors:
+ self.log_error('%s', errors)
+ sts = fo.close()
+ if sts:
+ self.log_error("CGI script exit status %#x", sts)
+ else:
+ self.log_message("CGI script exited OK")
+
+ else:
+ # Other O.S. -- execute script in this process
+ save_argv = sys.argv
+ save_stdin = sys.stdin
+ save_stdout = sys.stdout
+ save_stderr = sys.stderr
+ try:
+ save_cwd = os.getcwd()
+ try:
+ sys.argv = [scriptfile]
+ if '=' not in decoded_query:
+ sys.argv.append(decoded_query)
+ sys.stdout = self.wfile
+ sys.stdin = self.rfile
+ execfile(scriptfile, {"__name__": "__main__"})
+ finally:
+ sys.argv = save_argv
+ sys.stdin = save_stdin
+ sys.stdout = save_stdout
+ sys.stderr = save_stderr
+ os.chdir(save_cwd)
+ except SystemExit, sts:
+ self.log_error("CGI script exit status %s", str(sts))
+ else:
+ self.log_message("CGI script exited OK")
+
+
+nobody = None
+
+def nobody_uid():
+ """Internal routine to get nobody's uid"""
+ global nobody
+ if nobody:
+ return nobody
+ try:
+ import pwd
+ except ImportError:
+ return -1
+ try:
+ nobody = pwd.getpwnam('nobody')[2]
+ except KeyError:
+ nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
+ return nobody
+
+
+def executable(path):
+ """Test for executable file."""
+ try:
+ st = os.stat(path)
+ except os.error:
+ return False
+ return st.st_mode & 0111 != 0
+
+
+def test(HandlerClass = CGIHTTPRequestHandler,
+ ServerClass = BaseHTTPServer.HTTPServer):
+ SimpleHTTPServer.test(HandlerClass, ServerClass)
+
+
+if __name__ == '__main__':
+ test()
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/ConfigParser.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,640 @@
+"""Configuration file parser.
+
+A setup file consists of sections, lead by a "[section]" header,
+and followed by "name: value" entries, with continuations and such in
+the style of RFC 822.
+
+The option values can contain format strings which refer to other values in
+the same section, or values in a special [DEFAULT] section.
+
+For example:
+
+ something: %(dir)s/whatever
+
+would resolve the "%(dir)s" to the value of dir. All reference
+expansions are done late, on demand.
+
+Intrinsic defaults can be specified by passing them into the
+ConfigParser constructor as a dictionary.
+
+class:
+
+ConfigParser -- responsible for parsing a list of
+ configuration files, and managing the parsed database.
+
+ methods:
+
+ __init__(defaults=None)
+ create the parser and specify a dictionary of intrinsic defaults. The
+ keys must be strings, the values must be appropriate for %()s string
+ interpolation. Note that `__name__' is always an intrinsic default;
+ its value is the section's name.
+
+ sections()
+ return all the configuration section names, sans DEFAULT
+
+ has_section(section)
+ return whether the given section exists
+
+ has_option(section, option)
+ return whether the given option exists in the given section
+
+ options(section)
+ return list of configuration options for the named section
+
+ read(filenames)
+ read and parse the list of named configuration files, given by
+ name. A single filename is also allowed. Non-existing files
+ are ignored. Return list of successfully read files.
+
+ readfp(fp, filename=None)
+ read and parse one configuration file, given as a file object.
+ The filename defaults to fp.name; it is only used in error
+ messages (if fp has no `name' attribute, the string `<???>' is used).
+
+ get(section, option, raw=False, vars=None)
+ return a string value for the named option. All % interpolations are
+ expanded in the return values, based on the defaults passed into the
+ constructor and the DEFAULT section. Additional substitutions may be
+ provided using the `vars' argument, which must be a dictionary whose
+ contents override any pre-existing defaults.
+
+ getint(section, options)
+ like get(), but convert value to an integer
+
+ getfloat(section, options)
+ like get(), but convert value to a float
+
+ getboolean(section, options)
+ like get(), but convert value to a boolean (currently case
+ insensitively defined as 0, false, no, off for False, and 1, true,
+ yes, on for True). Returns False or True.
+
+ items(section, raw=False, vars=None)
+ return a list of tuples with (name, value) for each option
+ in the section.
+
+ remove_section(section)
+ remove the given file section and all its options
+
+ remove_option(section, option)
+ remove the given option from the given section
+
+ set(section, option, value)
+ set the given option
+
+ write(fp)
+ write the configuration state in .ini format
+"""
+
+import re
+
+__all__ = ["NoSectionError", "DuplicateSectionError", "NoOptionError",
+ "InterpolationError", "InterpolationDepthError",
+ "InterpolationSyntaxError", "ParsingError",
+ "MissingSectionHeaderError",
+ "ConfigParser", "SafeConfigParser", "RawConfigParser",
+ "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
+
+DEFAULTSECT = "DEFAULT"
+
+MAX_INTERPOLATION_DEPTH = 10
+
+
+
+# exception classes
+class Error(Exception):
+ """Base class for ConfigParser exceptions."""
+
+ def __init__(self, msg=''):
+ self.message = msg
+ Exception.__init__(self, msg)
+
+ def __repr__(self):
+ return self.message
+
+ __str__ = __repr__
+
+class NoSectionError(Error):
+ """Raised when no section matches a requested option."""
+
+ def __init__(self, section):
+ Error.__init__(self, 'No section: %r' % (section,))
+ self.section = section
+
+class DuplicateSectionError(Error):
+ """Raised when a section is multiply-created."""
+
+ def __init__(self, section):
+ Error.__init__(self, "Section %r already exists" % section)
+ self.section = section
+
+class NoOptionError(Error):
+ """A requested option was not found."""
+
+ def __init__(self, option, section):
+ Error.__init__(self, "No option %r in section: %r" %
+ (option, section))
+ self.option = option
+ self.section = section
+
+class InterpolationError(Error):
+ """Base class for interpolation-related exceptions."""
+
+ def __init__(self, option, section, msg):
+ Error.__init__(self, msg)
+ self.option = option
+ self.section = section
+
+class InterpolationMissingOptionError(InterpolationError):
+ """A string substitution required a setting which was not available."""
+
+ def __init__(self, option, section, rawval, reference):
+ msg = ("Bad value substitution:\n"
+ "\tsection: [%s]\n"
+ "\toption : %s\n"
+ "\tkey : %s\n"
+ "\trawval : %s\n"
+ % (section, option, reference, rawval))
+ InterpolationError.__init__(self, option, section, msg)
+ self.reference = reference
+
+class InterpolationSyntaxError(InterpolationError):
+ """Raised when the source text into which substitutions are made
+ does not conform to the required syntax."""
+
+class InterpolationDepthError(InterpolationError):
+ """Raised when substitutions are nested too deeply."""
+
+ def __init__(self, option, section, rawval):
+ msg = ("Value interpolation too deeply recursive:\n"
+ "\tsection: [%s]\n"
+ "\toption : %s\n"
+ "\trawval : %s\n"
+ % (section, option, rawval))
+ InterpolationError.__init__(self, option, section, msg)
+
+class ParsingError(Error):
+ """Raised when a configuration file does not follow legal syntax."""
+
+ def __init__(self, filename):
+ Error.__init__(self, 'File contains parsing errors: %s' % filename)
+ self.filename = filename
+ self.errors = []
+
+ def append(self, lineno, line):
+ self.errors.append((lineno, line))
+ self.message += '\n\t[line %2d]: %s' % (lineno, line)
+
+class MissingSectionHeaderError(ParsingError):
+ """Raised when a key-value pair is found before any section header."""
+
+ def __init__(self, filename, lineno, line):
+ Error.__init__(
+ self,
+ 'File contains no section headers.\nfile: %s, line: %d\n%r' %
+ (filename, lineno, line))
+ self.filename = filename
+ self.lineno = lineno
+ self.line = line
+
+
+
+class RawConfigParser:
+ def __init__(self, defaults=None):
+ self._sections = {}
+ self._defaults = {}
+ if defaults:
+ for key, value in defaults.items():
+ self._defaults[self.optionxform(key)] = value
+
+ def defaults(self):
+ return self._defaults
+
+ def sections(self):
+ """Return a list of section names, excluding [DEFAULT]"""
+ # self._sections will never have [DEFAULT] in it
+ return self._sections.keys()
+
+ def add_section(self, section):
+ """Create a new section in the configuration.
+
+ Raise DuplicateSectionError if a section by the specified name
+ already exists.
+ """
+ if section in self._sections:
+ raise DuplicateSectionError(section)
+ self._sections[section] = {}
+
+ def has_section(self, section):
+ """Indicate whether the named section is present in the configuration.
+
+ The DEFAULT section is not acknowledged.
+ """
+ return section in self._sections
+
+ def options(self, section):
+ """Return a list of option names for the given section name."""
+ try:
+ opts = self._sections[section].copy()
+ except KeyError:
+ raise NoSectionError(section)
+ opts.update(self._defaults)
+ if '__name__' in opts:
+ del opts['__name__']
+ return opts.keys()
+
+ def read(self, filenames):
+ """Read and parse a filename or a list of filenames.
+
+ Files that cannot be opened are silently ignored; this is
+ designed so that you can specify a list of potential
+ configuration file locations (e.g. current directory, user's
+ home directory, systemwide directory), and all existing
+ configuration files in the list will be read. A single
+ filename may also be given.
+
+ Return list of successfully read files.
+ """
+ if isinstance(filenames, basestring):
+ filenames = [filenames]
+ read_ok = []
+ for filename in filenames:
+ try:
+ fp = open(filename)
+ except IOError:
+ continue
+ self._read(fp, filename)
+ fp.close()
+ read_ok.append(filename)
+ return read_ok
+
+ def readfp(self, fp, filename=None):
+ """Like read() but the argument must be a file-like object.
+
+ The `fp' argument must have a `readline' method. Optional
+ second argument is the `filename', which if not given, is
+ taken from fp.name. If fp has no `name' attribute, `<???>' is
+ used.
+
+ """
+ if filename is None:
+ try:
+ filename = fp.name
+ except AttributeError:
+ filename = '<???>'
+ self._read(fp, filename)
+
+ def get(self, section, option):
+ opt = self.optionxform(option)
+ if section not in self._sections:
+ if section != DEFAULTSECT:
+ raise NoSectionError(section)
+ if opt in self._defaults:
+ return self._defaults[opt]
+ else:
+ raise NoOptionError(option, section)
+ elif opt in self._sections[section]:
+ return self._sections[section][opt]
+ elif opt in self._defaults:
+ return self._defaults[opt]
+ else:
+ raise NoOptionError(option, section)
+
+ def items(self, section):
+ try:
+ d2 = self._sections[section]
+ except KeyError:
+ if section != DEFAULTSECT:
+ raise NoSectionError(section)
+ d2 = {}
+ d = self._defaults.copy()
+ d.update(d2)
+ if "__name__" in d:
+ del d["__name__"]
+ return d.items()
+
+ def _get(self, section, conv, option):
+ return conv(self.get(section, option))
+
+ def getint(self, section, option):
+ return self._get(section, int, option)
+
+ def getfloat(self, section, option):
+ return self._get(section, float, option)
+
+ _boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
+ '0': False, 'no': False, 'false': False, 'off': False}
+
+ def getboolean(self, section, option):
+ v = self.get(section, option)
+ if v.lower() not in self._boolean_states:
+ raise ValueError, 'Not a boolean: %s' % v
+ return self._boolean_states[v.lower()]
+
+ def optionxform(self, optionstr):
+ return optionstr.lower()
+
+ def has_option(self, section, option):
+ """Check for the existence of a given option in a given section."""
+ if not section or section == DEFAULTSECT:
+ option = self.optionxform(option)
+ return option in self._defaults
+ elif section not in self._sections:
+ return False
+ else:
+ option = self.optionxform(option)
+ return (option in self._sections[section]
+ or option in self._defaults)
+
+ def set(self, section, option, value):
+ """Set an option."""
+ if not section or section == DEFAULTSECT:
+ sectdict = self._defaults
+ else:
+ try:
+ sectdict = self._sections[section]
+ except KeyError:
+ raise NoSectionError(section)
+ sectdict[self.optionxform(option)] = value
+
+ def write(self, fp):
+ """Write an .ini-format representation of the configuration state."""
+ if self._defaults:
+ fp.write("[%s]\n" % DEFAULTSECT)
+ for (key, value) in self._defaults.items():
+ fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t')))
+ fp.write("\n")
+ for section in self._sections:
+ fp.write("[%s]\n" % section)
+ for (key, value) in self._sections[section].items():
+ if key != "__name__":
+ fp.write("%s = %s\n" %
+ (key, str(value).replace('\n', '\n\t')))
+ fp.write("\n")
+
+ def remove_option(self, section, option):
+ """Remove an option."""
+ if not section or section == DEFAULTSECT:
+ sectdict = self._defaults
+ else:
+ try:
+ sectdict = self._sections[section]
+ except KeyError:
+ raise NoSectionError(section)
+ option = self.optionxform(option)
+ existed = option in sectdict
+ if existed:
+ del sectdict[option]
+ return existed
+
+ def remove_section(self, section):
+ """Remove a file section."""
+ existed = section in self._sections
+ if existed:
+ del self._sections[section]
+ return existed
+
+ #
+ # Regular expressions for parsing section headers and options.
+ #
+ SECTCRE = re.compile(
+ r'\[' # [
+ r'(?P<header>[^]]+)' # very permissive!
+ r'\]' # ]
+ )
+ OPTCRE = re.compile(
+ r'(?P<option>[^:=\s][^:=]*)' # very permissive!
+ r'\s*(?P<vi>[:=])\s*' # any number of space/tab,
+ # followed by separator
+ # (either : or =), followed
+ # by any # space/tab
+ r'(?P<value>.*)$' # everything up to eol
+ )
+
+ def _read(self, fp, fpname):
+ """Parse a sectioned setup file.
+
+ The sections in setup file contains a title line at the top,
+ indicated by a name in square brackets (`[]'), plus key/value
+ options lines, indicated by `name: value' format lines.
+ Continuations are represented by an embedded newline then
+ leading whitespace. Blank lines, lines beginning with a '#',
+ and just about everything else are ignored.
+ """
+ cursect = None # None, or a dictionary
+ optname = None
+ lineno = 0
+ e = None # None, or an exception
+ while True:
+ line = fp.readline()
+ if not line:
+ break
+ lineno = lineno + 1
+ # comment or blank line?
+ if line.strip() == '' or line[0] in '#;':
+ continue
+ if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
+ # no leading whitespace
+ continue
+ # continuation line?
+ if line[0].isspace() and cursect is not None and optname:
+ value = line.strip()
+ if value:
+ cursect[optname] = "%s\n%s" % (cursect[optname], value)
+ # a section header or option header?
+ else:
+ # is it a section header?
+ mo = self.SECTCRE.match(line)
+ if mo:
+ sectname = mo.group('header')
+ if sectname in self._sections:
+ cursect = self._sections[sectname]
+ elif sectname == DEFAULTSECT:
+ cursect = self._defaults
+ else:
+ cursect = {'__name__': sectname}
+ self._sections[sectname] = cursect
+ # So sections can't start with a continuation line
+ optname = None
+ # no section header in the file?
+ elif cursect is None:
+ raise MissingSectionHeaderError(fpname, lineno, line)
+ # an option line?
+ else:
+ mo = self.OPTCRE.match(line)
+ if mo:
+ optname, vi, optval = mo.group('option', 'vi', 'value')
+ if vi in ('=', ':') and ';' in optval:
+ # ';' is a comment delimiter only if it follows
+ # a spacing character
+ pos = optval.find(';')
+ if pos != -1 and optval[pos-1].isspace():
+ optval = optval[:pos]
+ optval = optval.strip()
+ # allow empty values
+ if optval == '""':
+ optval = ''
+ optname = self.optionxform(optname.rstrip())
+ cursect[optname] = optval
+ else:
+ # a non-fatal parsing error occurred. set up the
+ # exception but keep going. the exception will be
+ # raised at the end of the file and will contain a
+ # list of all bogus lines
+ if not e:
+ e = ParsingError(fpname)
+ e.append(lineno, repr(line))
+ # if any parsing errors occurred, raise an exception
+ if e:
+ raise e
+
+
+class ConfigParser(RawConfigParser):
+
+ def get(self, section, option, raw=False, vars=None):
+ """Get an option value for a given section.
+
+ All % interpolations are expanded in the return values, based on the
+ defaults passed into the constructor, unless the optional argument
+ `raw' is true. Additional substitutions may be provided using the
+ `vars' argument, which must be a dictionary whose contents overrides
+ any pre-existing defaults.
+
+ The section DEFAULT is special.
+ """
+ d = self._defaults.copy()
+ try:
+ d.update(self._sections[section])
+ except KeyError:
+ if section != DEFAULTSECT:
+ raise NoSectionError(section)
+ # Update with the entry specific variables
+ if vars:
+ for key, value in vars.items():
+ d[self.optionxform(key)] = value
+ option = self.optionxform(option)
+ try:
+ value = d[option]
+ except KeyError:
+ raise NoOptionError(option, section)
+
+ if raw:
+ return value
+ else:
+ return self._interpolate(section, option, value, d)
+
+ def items(self, section, raw=False, vars=None):
+ """Return a list of tuples with (name, value) for each option
+ in the section.
+
+ All % interpolations are expanded in the return values, based on the
+ defaults passed into the constructor, unless the optional argument
+ `raw' is true. Additional substitutions may be provided using the
+ `vars' argument, which must be a dictionary whose contents overrides
+ any pre-existing defaults.
+
+ The section DEFAULT is special.
+ """
+ d = self._defaults.copy()
+ try:
+ d.update(self._sections[section])
+ except KeyError:
+ if section != DEFAULTSECT:
+ raise NoSectionError(section)
+ # Update with the entry specific variables
+ if vars:
+ for key, value in vars.items():
+ d[self.optionxform(key)] = value
+ options = d.keys()
+ if "__name__" in options:
+ options.remove("__name__")
+ if raw:
+ return [(option, d[option])
+ for option in options]
+ else:
+ return [(option, self._interpolate(section, option, d[option], d))
+ for option in options]
+
+ def _interpolate(self, section, option, rawval, vars):
+ # do the string interpolation
+ value = rawval
+ depth = MAX_INTERPOLATION_DEPTH
+ while depth: # Loop through this until it's done
+ depth -= 1
+ if "%(" in value:
+ value = self._KEYCRE.sub(self._interpolation_replace, value)
+ try:
+ value = value % vars
+ except KeyError, e:
+ raise InterpolationMissingOptionError(
+ option, section, rawval, e[0])
+ else:
+ break
+ if "%(" in value:
+ raise InterpolationDepthError(option, section, rawval)
+ return value
+
+ _KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
+
+ def _interpolation_replace(self, match):
+ s = match.group(1)
+ if s is None:
+ return match.group()
+ else:
+ return "%%(%s)s" % self.optionxform(s)
+
+
+class SafeConfigParser(ConfigParser):
+
+ def _interpolate(self, section, option, rawval, vars):
+ # do the string interpolation
+ L = []
+ self._interpolate_some(option, L, rawval, section, vars, 1)
+ return ''.join(L)
+
+ _interpvar_match = re.compile(r"%\(([^)]+)\)s").match
+
+ def _interpolate_some(self, option, accum, rest, section, map, depth):
+ if depth > MAX_INTERPOLATION_DEPTH:
+ raise InterpolationDepthError(option, section, rest)
+ while rest:
+ p = rest.find("%")
+ if p < 0:
+ accum.append(rest)
+ return
+ if p > 0:
+ accum.append(rest[:p])
+ rest = rest[p:]
+ # p is no longer used
+ c = rest[1:2]
+ if c == "%":
+ accum.append("%")
+ rest = rest[2:]
+ elif c == "(":
+ m = self._interpvar_match(rest)
+ if m is None:
+ raise InterpolationSyntaxError(option, section,
+ "bad interpolation variable reference %r" % rest)
+ var = self.optionxform(m.group(1))
+ rest = rest[m.end():]
+ try:
+ v = map[var]
+ except KeyError:
+ raise InterpolationMissingOptionError(
+ option, section, rest, var)
+ if "%" in v:
+ self._interpolate_some(option, accum, v,
+ section, map, depth + 1)
+ else:
+ accum.append(v)
+ else:
+ raise InterpolationSyntaxError(
+ option, section,
+ "'%%' must be followed by '%%' or '(', found: %r" % (rest,))
+
+ def set(self, section, option, value):
+ """Set an option. Extend ConfigParser.set: check for string values."""
+ if not isinstance(value, basestring):
+ raise TypeError("option values must be strings")
+ ConfigParser.set(self, section, option, value)
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/Cookie.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,746 @@
+#!/usr/bin/env python
+#
+
+####
+# Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu>
+#
+# All Rights Reserved
+#
+# Permission to use, copy, modify, and distribute this software
+# and its documentation for any purpose and without fee is hereby
+# granted, provided that the above copyright notice appear in all
+# copies and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of
+# Timothy O'Malley not be used in advertising or publicity
+# pertaining to distribution of the software without specific, written
+# prior permission.
+#
+# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
+# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+#
+####
+#
+# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
+# by Timothy O'Malley <timo@alum.mit.edu>
+#
+# Cookie.py is a Python module for the handling of HTTP
+# cookies as a Python dictionary. See RFC 2109 for more
+# information on cookies.
+#
+# The original idea to treat Cookies as a dictionary came from
+# Dave Mitchell (davem@magnet.com) in 1995, when he released the
+# first version of nscookie.py.
+#
+####
+
+r"""
+Here's a sample session to show how to use this module.
+At the moment, this is the only documentation.
+
+The Basics
+----------
+
+Importing is easy..
+
+ >>> import Cookie
+
+Most of the time you start by creating a cookie. Cookies come in
+three flavors, each with slightly different encoding semantics, but
+more on that later.
+
+ >>> C = Cookie.SimpleCookie()
+ >>> C = Cookie.SerialCookie()
+ >>> C = Cookie.SmartCookie()
+
+[Note: Long-time users of Cookie.py will remember using
+Cookie.Cookie() to create an Cookie object. Although deprecated, it
+is still supported by the code. See the Backward Compatibility notes
+for more information.]
+
+Once you've created your Cookie, you can add values just as if it were
+a dictionary.
+
+ >>> C = Cookie.SmartCookie()
+ >>> C["fig"] = "newton"
+ >>> C["sugar"] = "wafer"
+ >>> C.output()
+ 'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
+
+Notice that the printable representation of a Cookie is the
+appropriate format for a Set-Cookie: header. This is the
+default behavior. You can change the header and printed
+attributes by using the .output() function
+
+ >>> C = Cookie.SmartCookie()
+ >>> C["rocky"] = "road"
+ >>> C["rocky"]["path"] = "/cookie"
+ >>> print C.output(header="Cookie:")
+ Cookie: rocky=road; Path=/cookie
+ >>> print C.output(attrs=[], header="Cookie:")
+ Cookie: rocky=road
+
+The load() method of a Cookie extracts cookies from a string. In a
+CGI script, you would use this method to extract the cookies from the
+HTTP_COOKIE environment variable.
+
+ >>> C = Cookie.SmartCookie()
+ >>> C.load("chips=ahoy; vienna=finger")
+ >>> C.output()
+ 'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
+
+The load() method is darn-tootin smart about identifying cookies
+within a string. Escaped quotation marks, nested semicolons, and other
+such trickeries do not confuse it.
+
+ >>> C = Cookie.SmartCookie()
+ >>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
+ >>> print C
+ Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
+
+Each element of the Cookie also supports all of the RFC 2109
+Cookie attributes. Here's an example which sets the Path
+attribute.
+
+ >>> C = Cookie.SmartCookie()
+ >>> C["oreo"] = "doublestuff"
+ >>> C["oreo"]["path"] = "/"
+ >>> print C
+ Set-Cookie: oreo=doublestuff; Path=/
+
+Each dictionary element has a 'value' attribute, which gives you
+back the value associated with the key.
+
+ >>> C = Cookie.SmartCookie()
+ >>> C["twix"] = "none for you"
+ >>> C["twix"].value
+ 'none for you'
+
+
+A Bit More Advanced
+-------------------
+
+As mentioned before, there are three different flavors of Cookie
+objects, each with different encoding/decoding semantics. This
+section briefly discusses the differences.
+
+SimpleCookie
+
+The SimpleCookie expects that all values should be standard strings.
+Just to be sure, SimpleCookie invokes the str() builtin to convert
+the value to a string, when the values are set dictionary-style.
+
+ >>> C = Cookie.SimpleCookie()
+ >>> C["number"] = 7
+ >>> C["string"] = "seven"
+ >>> C["number"].value
+ '7'
+ >>> C["string"].value
+ 'seven'
+ >>> C.output()
+ 'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
+
+
+SerialCookie
+
+The SerialCookie expects that all values should be serialized using
+cPickle (or pickle, if cPickle isn't available). As a result of
+serializing, SerialCookie can save almost any Python object to a
+value, and recover the exact same object when the cookie has been
+returned. (SerialCookie can yield some strange-looking cookie
+values, however.)
+
+ >>> C = Cookie.SerialCookie()
+ >>> C["number"] = 7
+ >>> C["string"] = "seven"
+ >>> C["number"].value
+ 7
+ >>> C["string"].value
+ 'seven'
+ >>> C.output()
+ 'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string="S\'seven\'\\012p1\\012."'
+
+Be warned, however, if SerialCookie cannot de-serialize a value (because
+it isn't a valid pickle'd object), IT WILL RAISE AN EXCEPTION.
+
+
+SmartCookie
+
+The SmartCookie combines aspects of each of the other two flavors.
+When setting a value in a dictionary-fashion, the SmartCookie will
+serialize (ala cPickle) the value *if and only if* it isn't a
+Python string. String objects are *not* serialized. Similarly,
+when the load() method parses out values, it attempts to de-serialize
+the value. If it fails, then it fallsback to treating the value
+as a string.
+
+ >>> C = Cookie.SmartCookie()
+ >>> C["number"] = 7
+ >>> C["string"] = "seven"
+ >>> C["number"].value
+ 7
+ >>> C["string"].value
+ 'seven'
+ >>> C.output()
+ 'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string=seven'
+
+
+Backwards Compatibility
+-----------------------
+
+In order to keep compatibilty with earlier versions of Cookie.py,
+it is still possible to use Cookie.Cookie() to create a Cookie. In
+fact, this simply returns a SmartCookie.
+
+ >>> C = Cookie.Cookie()
+ >>> print C.__class__.__name__
+ SmartCookie
+
+
+Finis.
+""" #"
+# ^
+# |----helps out font-lock
+
+#
+# Import our required modules
+#
+import string
+
+try:
+ from cPickle import dumps, loads
+except ImportError:
+ from pickle import dumps, loads
+
+import re, warnings
+
+__all__ = ["CookieError","BaseCookie","SimpleCookie","SerialCookie",
+ "SmartCookie","Cookie"]
+
+_nulljoin = ''.join
+_semispacejoin = '; '.join
+_spacejoin = ' '.join
+
+#
+# Define an exception visible to External modules
+#
+class CookieError(Exception):
+ pass
+
+
+# These quoting routines conform to the RFC2109 specification, which in
+# turn references the character definitions from RFC2068. They provide
+# a two-way quoting algorithm. Any non-text character is translated
+# into a 4 character sequence: a forward-slash followed by the
+# three-digit octal equivalent of the character. Any '\' or '"' is
+# quoted with a preceeding '\' slash.
+#
+# These are taken from RFC2068 and RFC2109.
+# _LegalChars is the list of chars which don't require "'s
+# _Translator hash-table for fast quoting
+#
+_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~"
+_Translator = {
+ '\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
+ '\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
+ '\006' : '\\006', '\007' : '\\007', '\010' : '\\010',
+ '\011' : '\\011', '\012' : '\\012', '\013' : '\\013',
+ '\014' : '\\014', '\015' : '\\015', '\016' : '\\016',
+ '\017' : '\\017', '\020' : '\\020', '\021' : '\\021',
+ '\022' : '\\022', '\023' : '\\023', '\024' : '\\024',
+ '\025' : '\\025', '\026' : '\\026', '\027' : '\\027',
+ '\030' : '\\030', '\031' : '\\031', '\032' : '\\032',
+ '\033' : '\\033', '\034' : '\\034', '\035' : '\\035',
+ '\036' : '\\036', '\037' : '\\037',
+
+ '"' : '\\"', '\\' : '\\\\',
+
+ '\177' : '\\177', '\200' : '\\200', '\201' : '\\201',
+ '\202' : '\\202', '\203' : '\\203', '\204' : '\\204',
+ '\205' : '\\205', '\206' : '\\206', '\207' : '\\207',
+ '\210' : '\\210', '\211' : '\\211', '\212' : '\\212',
+ '\213' : '\\213', '\214' : '\\214', '\215' : '\\215',
+ '\216' : '\\216', '\217' : '\\217', '\220' : '\\220',
+ '\221' : '\\221', '\222' : '\\222', '\223' : '\\223',
+ '\224' : '\\224', '\225' : '\\225', '\226' : '\\226',
+ '\227' : '\\227', '\230' : '\\230', '\231' : '\\231',
+ '\232' : '\\232', '\233' : '\\233', '\234' : '\\234',
+ '\235' : '\\235', '\236' : '\\236', '\237' : '\\237',
+ '\240' : '\\240', '\241' : '\\241', '\242' : '\\242',
+ '\243' : '\\243', '\244' : '\\244', '\245' : '\\245',
+ '\246' : '\\246', '\247' : '\\247', '\250' : '\\250',
+ '\251' : '\\251', '\252' : '\\252', '\253' : '\\253',
+ '\254' : '\\254', '\255' : '\\255', '\256' : '\\256',
+ '\257' : '\\257', '\260' : '\\260', '\261' : '\\261',
+ '\262' : '\\262', '\263' : '\\263', '\264' : '\\264',
+ '\265' : '\\265', '\266' : '\\266', '\267' : '\\267',
+ '\270' : '\\270', '\271' : '\\271', '\272' : '\\272',
+ '\273' : '\\273', '\274' : '\\274', '\275' : '\\275',
+ '\276' : '\\276', '\277' : '\\277', '\300' : '\\300',
+ '\301' : '\\301', '\302' : '\\302', '\303' : '\\303',
+ '\304' : '\\304', '\305' : '\\305', '\306' : '\\306',
+ '\307' : '\\307', '\310' : '\\310', '\311' : '\\311',
+ '\312' : '\\312', '\313' : '\\313', '\314' : '\\314',
+ '\315' : '\\315', '\316' : '\\316', '\317' : '\\317',
+ '\320' : '\\320', '\321' : '\\321', '\322' : '\\322',
+ '\323' : '\\323', '\324' : '\\324', '\325' : '\\325',
+ '\326' : '\\326', '\327' : '\\327', '\330' : '\\330',
+ '\331' : '\\331', '\332' : '\\332', '\333' : '\\333',
+ '\334' : '\\334', '\335' : '\\335', '\336' : '\\336',
+ '\337' : '\\337', '\340' : '\\340', '\341' : '\\341',
+ '\342' : '\\342', '\343' : '\\343', '\344' : '\\344',
+ '\345' : '\\345', '\346' : '\\346', '\347' : '\\347',
+ '\350' : '\\350', '\351' : '\\351', '\352' : '\\352',
+ '\353' : '\\353', '\354' : '\\354', '\355' : '\\355',
+ '\356' : '\\356', '\357' : '\\357', '\360' : '\\360',
+ '\361' : '\\361', '\362' : '\\362', '\363' : '\\363',
+ '\364' : '\\364', '\365' : '\\365', '\366' : '\\366',
+ '\367' : '\\367', '\370' : '\\370', '\371' : '\\371',
+ '\372' : '\\372', '\373' : '\\373', '\374' : '\\374',
+ '\375' : '\\375', '\376' : '\\376', '\377' : '\\377'
+ }
+
+_idmap = ''.join(chr(x) for x in xrange(256))
+
+def _quote(str, LegalChars=_LegalChars,
+ idmap=_idmap, translate=string.translate):
+ #
+ # If the string does not need to be double-quoted,
+ # then just return the string. Otherwise, surround
+ # the string in doublequotes and precede quote (with a \)
+ # special characters.
+ #
+ if "" == translate(str, idmap, LegalChars):
+ return str
+ else:
+ return '"' + _nulljoin( map(_Translator.get, str, str) ) + '"'
+# end _quote
+
+
+_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
+_QuotePatt = re.compile(r"[\\].")
+
+def _unquote(str):
+ # If there aren't any doublequotes,
+ # then there can't be any special characters. See RFC 2109.
+ if len(str) < 2:
+ return str
+ if str[0] != '"' or str[-1] != '"':
+ return str
+
+ # We have to assume that we must decode this string.
+ # Down to work.
+
+ # Remove the "s
+ str = str[1:-1]
+
+ # Check for special sequences. Examples:
+ # \012 --> \n
+ # \" --> "
+ #
+ i = 0
+ n = len(str)
+ res = []
+ while 0 <= i < n:
+ Omatch = _OctalPatt.search(str, i)
+ Qmatch = _QuotePatt.search(str, i)
+ if not Omatch and not Qmatch: # Neither matched
+ res.append(str[i:])
+ break
+ # else:
+ j = k = -1
+ if Omatch: j = Omatch.start(0)
+ if Qmatch: k = Qmatch.start(0)
+ if Qmatch and ( not Omatch or k < j ): # QuotePatt matched
+ res.append(str[i:k])
+ res.append(str[k+1])
+ i = k+2
+ else: # OctalPatt matched
+ res.append(str[i:j])
+ res.append( chr( int(str[j+1:j+4], 8) ) )
+ i = j+4
+ return _nulljoin(res)
+# end _unquote
+
+# The _getdate() routine is used to set the expiration time in
+# the cookie's HTTP header. By default, _getdate() returns the
+# current time in the appropriate "expires" format for a
+# Set-Cookie header. The one optional argument is an offset from
+# now, in seconds. For example, an offset of -3600 means "one hour ago".
+# The offset may be a floating point number.
+#
+
+_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
+
+_monthname = [None,
+ 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+ 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
+
+def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
+ from time import gmtime, time
+ now = time()
+ year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
+ return "%s, %02d-%3s-%4d %02d:%02d:%02d GMT" % \
+ (weekdayname[wd], day, monthname[month], year, hh, mm, ss)
+
+
+#
+# A class to hold ONE key,value pair.
+# In a cookie, each such pair may have several attributes.
+# so this class is used to keep the attributes associated
+# with the appropriate key,value pair.
+# This class also includes a coded_value attribute, which
+# is used to hold the network representation of the
+# value. This is most useful when Python objects are
+# pickled for network transit.
+#
+
+class Morsel(dict):
+ # RFC 2109 lists these attributes as reserved:
+ # path comment domain
+ # max-age secure version
+ #
+ # For historical reasons, these attributes are also reserved:
+ # expires
+ #
+ # This dictionary provides a mapping from the lowercase
+ # variant on the left to the appropriate traditional
+ # formatting on the right.
+ _reserved = { "expires" : "expires",
+ "path" : "Path",
+ "comment" : "Comment",
+ "domain" : "Domain",
+ "max-age" : "Max-Age",
+ "secure" : "secure",
+ "version" : "Version",
+ }
+
+ def __init__(self):
+ # Set defaults
+ self.key = self.value = self.coded_value = None
+
+ # Set default attributes
+ for K in self._reserved:
+ dict.__setitem__(self, K, "")
+ # end __init__
+
+ def __setitem__(self, K, V):
+ K = K.lower()
+ if not K in self._reserved:
+ raise CookieError("Invalid Attribute %s" % K)
+ dict.__setitem__(self, K, V)
+ # end __setitem__
+
+ def isReservedKey(self, K):
+ return K.lower() in self._reserved
+ # end isReservedKey
+
+ def set(self, key, val, coded_val,
+ LegalChars=_LegalChars,
+ idmap=_idmap, translate=string.translate):
+ # First we verify that the key isn't a reserved word
+ # Second we make sure it only contains legal characters
+ if key.lower() in self._reserved:
+ raise CookieError("Attempt to set a reserved key: %s" % key)
+ if "" != translate(key, idmap, LegalChars):
+ raise CookieError("Illegal key value: %s" % key)
+
+ # It's a good key, so save it.
+ self.key = key
+ self.value = val
+ self.coded_value = coded_val
+ # end set
+
+ def output(self, attrs=None, header = "Set-Cookie:"):
+ return "%s %s" % ( header, self.OutputString(attrs) )
+
+ __str__ = output
+
+ def __repr__(self):
+ return '<%s: %s=%s>' % (self.__class__.__name__,
+ self.key, repr(self.value) )
+
+ def js_output(self, attrs=None):
+ # Print javascript
+ return """
+ <script type="text/javascript">
+ <!-- begin hiding
+ document.cookie = \"%s\";
+ // end hiding -->
+ </script>
+ """ % ( self.OutputString(attrs), )
+ # end js_output()
+
+ def OutputString(self, attrs=None):
+ # Build up our result
+ #
+ result = []
+ RA = result.append
+
+ # First, the key=value pair
+ RA("%s=%s" % (self.key, self.coded_value))
+
+ # Now add any defined attributes
+ if attrs is None:
+ attrs = self._reserved
+ items = self.items()
+ items.sort()
+ for K,V in items:
+ if V == "": continue
+ if K not in attrs: continue
+ if K == "expires" and type(V) == type(1):
+ RA("%s=%s" % (self._reserved[K], _getdate(V)))
+ elif K == "max-age" and type(V) == type(1):
+ RA("%s=%d" % (self._reserved[K], V))
+ elif K == "secure":
+ RA(str(self._reserved[K]))
+ else:
+ RA("%s=%s" % (self._reserved[K], V))
+
+ # Return the result
+ return _semispacejoin(result)
+ # end OutputString
+# end Morsel class
+
+
+
+#
+# Pattern for finding cookie
+#
+# This used to be strict parsing based on the RFC2109 and RFC2068
+# specifications. I have since discovered that MSIE 3.0x doesn't
+# follow the character rules outlined in those specs. As a
+# result, the parsing rules here are less strict.
+#
+
+_LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
+_CookiePattern = re.compile(
+ r"(?x)" # This is a Verbose pattern
+ r"(?P<key>" # Start of group 'key'
+ ""+ _LegalCharsPatt +"+?" # Any word of at least one letter, nongreedy
+ r")" # End of group 'key'
+ r"\s*=\s*" # Equal Sign
+ r"(?P<val>" # Start of group 'val'
+ r'"(?:[^\\"]|\\.)*"' # Any doublequoted string
+ r"|" # or
+ ""+ _LegalCharsPatt +"*" # Any word or empty string
+ r")" # End of group 'val'
+ r"\s*;?" # Probably ending in a semi-colon
+ )
+
+
+# At long last, here is the cookie class.
+# Using this class is almost just like using a dictionary.
+# See this module's docstring for example usage.
+#
+class BaseCookie(dict):
+ # A container class for a set of Morsels
+ #
+
+ def value_decode(self, val):
+ """real_value, coded_value = value_decode(STRING)
+ Called prior to setting a cookie's value from the network
+ representation. The VALUE is the value read from HTTP
+ header.
+ Override this function to modify the behavior of cookies.
+ """
+ return val, val
+ # end value_encode
+
+ def value_encode(self, val):
+ """real_value, coded_value = value_encode(VALUE)
+ Called prior to setting a cookie's value from the dictionary
+ representation. The VALUE is the value being assigned.
+ Override this function to modify the behavior of cookies.
+ """
+ strval = str(val)
+ return strval, strval
+ # end value_encode
+
+ def __init__(self, input=None):
+ if input: self.load(input)
+ # end __init__
+
+ def __set(self, key, real_value, coded_value):
+ """Private method for setting a cookie's value"""
+ M = self.get(key, Morsel())
+ M.set(key, real_value, coded_value)
+ dict.__setitem__(self, key, M)
+ # end __set
+
+ def __setitem__(self, key, value):
+ """Dictionary style assignment."""
+ rval, cval = self.value_encode(value)
+ self.__set(key, rval, cval)
+ # end __setitem__
+
+ def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
+ """Return a string suitable for HTTP."""
+ result = []
+ items = self.items()
+ items.sort()
+ for K,V in items:
+ result.append( V.output(attrs, header) )
+ return sep.join(result)
+ # end output
+
+ __str__ = output
+
+ def __repr__(self):
+ L = []
+ items = self.items()
+ items.sort()
+ for K,V in items:
+ L.append( '%s=%s' % (K,repr(V.value) ) )
+ return '<%s: %s>' % (self.__class__.__name__, _spacejoin(L))
+
+ def js_output(self, attrs=None):
+ """Return a string suitable for JavaScript."""
+ result = []
+ items = self.items()
+ items.sort()
+ for K,V in items:
+ result.append( V.js_output(attrs) )
+ return _nulljoin(result)
+ # end js_output
+
+ def load(self, rawdata):
+ """Load cookies from a string (presumably HTTP_COOKIE) or
+ from a dictionary. Loading cookies from a dictionary 'd'
+ is equivalent to calling:
+ map(Cookie.__setitem__, d.keys(), d.values())
+ """
+ if type(rawdata) == type(""):
+ self.__ParseString(rawdata)
+ else:
+ self.update(rawdata)
+ return
+ # end load()
+
+ def __ParseString(self, str, patt=_CookiePattern):
+ i = 0 # Our starting point
+ n = len(str) # Length of string
+ M = None # current morsel
+
+ while 0 <= i < n:
+ # Start looking for a cookie
+ match = patt.search(str, i)
+ if not match: break # No more cookies
+
+ K,V = match.group("key"), match.group("val")
+ i = match.end(0)
+
+ # Parse the key, value in case it's metainfo
+ if K[0] == "$":
+ # We ignore attributes which pertain to the cookie
+ # mechanism as a whole. See RFC 2109.
+ # (Does anyone care?)
+ if M:
+ M[ K[1:] ] = V
+ elif K.lower() in Morsel._reserved:
+ if M:
+ M[ K ] = _unquote(V)
+ else:
+ rval, cval = self.value_decode(V)
+ self.__set(K, rval, cval)
+ M = self[K]
+ # end __ParseString
+# end BaseCookie class
+
+class SimpleCookie(BaseCookie):
+ """SimpleCookie
+ SimpleCookie supports strings as cookie values. When setting
+ the value using the dictionary assignment notation, SimpleCookie
+ calls the builtin str() to convert the value to a string. Values
+ received from HTTP are kept as strings.
+ """
+ def value_decode(self, val):
+ return _unquote( val ), val
+ def value_encode(self, val):
+ strval = str(val)
+ return strval, _quote( strval )
+# end SimpleCookie
+
+class SerialCookie(BaseCookie):
+ """SerialCookie
+ SerialCookie supports arbitrary objects as cookie values. All
+ values are serialized (using cPickle) before being sent to the
+ client. All incoming values are assumed to be valid Pickle
+ representations. IF AN INCOMING VALUE IS NOT IN A VALID PICKLE
+ FORMAT, THEN AN EXCEPTION WILL BE RAISED.
+
+ Note: Large cookie values add overhead because they must be
+ retransmitted on every HTTP transaction.
+
+ Note: HTTP has a 2k limit on the size of a cookie. This class
+ does not check for this limit, so be careful!!!
+ """
+ def __init__(self, input=None):
+ warnings.warn("SerialCookie class is insecure; do not use it",
+ DeprecationWarning)
+ BaseCookie.__init__(self, input)
+ # end __init__
+ def value_decode(self, val):
+ # This could raise an exception!
+ return loads( _unquote(val) ), val
+ def value_encode(self, val):
+ return val, _quote( dumps(val) )
+# end SerialCookie
+
+class SmartCookie(BaseCookie):
+ """SmartCookie
+ SmartCookie supports arbitrary objects as cookie values. If the
+ object is a string, then it is quoted. If the object is not a
+ string, however, then SmartCookie will use cPickle to serialize
+ the object into a string representation.
+
+ Note: Large cookie values add overhead because they must be
+ retransmitted on every HTTP transaction.
+
+ Note: HTTP has a 2k limit on the size of a cookie. This class
+ does not check for this limit, so be careful!!!
+ """
+ def __init__(self, input=None):
+ warnings.warn("Cookie/SmartCookie class is insecure; do not use it",
+ DeprecationWarning)
+ BaseCookie.__init__(self, input)
+ # end __init__
+ def value_decode(self, val):
+ strval = _unquote(val)
+ try:
+ return loads(strval), val
+ except:
+ return strval, val
+ def value_encode(self, val):
+ if type(val) == type(""):
+ return val, _quote(val)
+ else:
+ return val, _quote( dumps(val) )
+# end SmartCookie
+
+
+###########################################################
+# Backwards Compatibility: Don't break any existing code!
+
+# We provide Cookie() as an alias for SmartCookie()
+Cookie = SmartCookie
+
+#
+###########################################################
+
+def _test():
+ import doctest, Cookie
+ return doctest.testmod(Cookie)
+
+if __name__ == "__main__":
+ _test()
+
+
+#Local Variables:
+#tab-width: 4
+#end:
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/DocXMLRPCServer.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,306 @@
+"""Self documenting XML-RPC Server.
+
+This module can be used to create XML-RPC servers that
+serve pydoc-style documentation in response to HTTP
+GET requests. This documentation is dynamically generated
+based on the functions and methods registered with the
+server.
+
+This module is built upon the pydoc and SimpleXMLRPCServer
+modules.
+"""
+
+import pydoc
+import inspect
+import re
+import sys
+
+from SimpleXMLRPCServer import (SimpleXMLRPCServer,
+ SimpleXMLRPCRequestHandler,
+ CGIXMLRPCRequestHandler,
+ resolve_dotted_attribute)
+
+class ServerHTMLDoc(pydoc.HTMLDoc):
+ """Class used to generate pydoc HTML document for a server"""
+
+ def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
+ """Mark up some plain text, given a context of symbols to look for.
+ Each context dictionary maps object names to anchor names."""
+ escape = escape or self.escape
+ results = []
+ here = 0
+
+ # XXX Note that this regular expressions does not allow for the
+ # hyperlinking of arbitrary strings being used as method
+ # names. Only methods with names consisting of word characters
+ # and '.'s are hyperlinked.
+ pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
+ r'RFC[- ]?(\d+)|'
+ r'PEP[- ]?(\d+)|'
+ r'(self\.)?((?:\w|\.)+))\b')
+ while 1:
+ match = pattern.search(text, here)
+ if not match: break
+ start, end = match.span()
+ results.append(escape(text[here:start]))
+
+ all, scheme, rfc, pep, selfdot, name = match.groups()
+ if scheme:
+ url = escape(all).replace('"', '"')
+ results.append('<a href="%s">%s</a>' % (url, url))
+ elif rfc:
+ url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
+ results.append('<a href="%s">%s</a>' % (url, escape(all)))
+ elif pep:
+ url = 'http://www.python.org/peps/pep-%04d.html' % int(pep)
+ results.append('<a href="%s">%s</a>' % (url, escape(all)))
+ elif text[end:end+1] == '(':
+ results.append(self.namelink(name, methods, funcs, classes))
+ elif selfdot:
+ results.append('self.<strong>%s</strong>' % name)
+ else:
+ results.append(self.namelink(name, classes))
+ here = end
+ results.append(escape(text[here:]))
+ return ''.join(results)
+
+ def docroutine(self, object, name=None, mod=None,
+ funcs={}, classes={}, methods={}, cl=None):
+ """Produce HTML documentation for a function or method object."""
+
+ anchor = (cl and cl.__name__ or '') + '-' + name
+ note = ''
+
+ title = '<a name="%s"><strong>%s</strong></a>' % (anchor, name)
+
+ if inspect.ismethod(object):
+ args, varargs, varkw, defaults = inspect.getargspec(object.im_func)
+ # exclude the argument bound to the instance, it will be
+ # confusing to the non-Python user
+ argspec = inspect.formatargspec (
+ args[1:],
+ varargs,
+ varkw,
+ defaults,
+ formatvalue=self.formatvalue
+ )
+ elif inspect.isfunction(object):
+ args, varargs, varkw, defaults = inspect.getargspec(object)
+ argspec = inspect.formatargspec(
+ args, varargs, varkw, defaults, formatvalue=self.formatvalue)
+ else:
+ argspec = '(...)'
+
+ if isinstance(object, tuple):
+ argspec = object[0] or argspec
+ docstring = object[1] or ""
+ else:
+ docstring = pydoc.getdoc(object)
+
+ decl = title + argspec + (note and self.grey(
+ '<font face="helvetica, arial">%s</font>' % note))
+
+ doc = self.markup(
+ docstring, self.preformat, funcs, classes, methods)
+ doc = doc and '<dd><tt>%s</tt></dd>' % doc
+ return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
+
+ def docserver(self, server_name, package_documentation, methods):
+ """Produce HTML documentation for an XML-RPC server."""
+
+ fdict = {}
+ for key, value in methods.items():
+ fdict[key] = '#-' + key
+ fdict[value] = fdict[key]
+
+ head = '<big><big><strong>%s</strong></big></big>' % server_name
+ result = self.heading(head, '#ffffff', '#7799ee')
+
+ doc = self.markup(package_documentation, self.preformat, fdict)
+ doc = doc and '<tt>%s</tt>' % doc
+ result = result + '<p>%s</p>\n' % doc
+
+ contents = []
+ method_items = methods.items()
+ method_items.sort()
+ for key, value in method_items:
+ contents.append(self.docroutine(value, key, funcs=fdict))
+ result = result + self.bigsection(
+ 'Methods', '#ffffff', '#eeaa77', pydoc.join(contents))
+
+ return result
+
+class XMLRPCDocGenerator:
+ """Generates documentation for an XML-RPC server.
+
+ This class is designed as mix-in and should not
+ be constructed directly.
+ """
+
+ def __init__(self):
+ # setup variables used for HTML documentation
+ self.server_name = 'XML-RPC Server Documentation'
+ self.server_documentation = \
+ "This server exports the following methods through the XML-RPC "\
+ "protocol."
+ self.server_title = 'XML-RPC Server Documentation'
+
+ def set_server_title(self, server_title):
+ """Set the HTML title of the generated server documentation"""
+
+ self.server_title = server_title
+
+ def set_server_name(self, server_name):
+ """Set the name of the generated HTML server documentation"""
+
+ self.server_name = server_name
+
+ def set_server_documentation(self, server_documentation):
+ """Set the documentation string for the entire server."""
+
+ self.server_documentation = server_documentation
+
+ def generate_html_documentation(self):
+ """generate_html_documentation() => html documentation for the server
+
+ Generates HTML documentation for the server using introspection for
+ installed functions and instances that do not implement the
+ _dispatch method. Alternatively, instances can choose to implement
+ the _get_method_argstring(method_name) method to provide the
+ argument string used in the documentation and the
+ _methodHelp(method_name) method to provide the help text used
+ in the documentation."""
+
+ methods = {}
+
+ for method_name in self.system_listMethods():
+ if self.funcs.has_key(method_name):
+ method = self.funcs[method_name]
+ elif self.instance is not None:
+ method_info = [None, None] # argspec, documentation
+ if hasattr(self.instance, '_get_method_argstring'):
+ method_info[0] = self.instance._get_method_argstring(method_name)
+ if hasattr(self.instance, '_methodHelp'):
+ method_info[1] = self.instance._methodHelp(method_name)
+
+ method_info = tuple(method_info)
+ if method_info != (None, None):
+ method = method_info
+ elif not hasattr(self.instance, '_dispatch'):
+ try:
+ method = resolve_dotted_attribute(
+ self.instance,
+ method_name
+ )
+ except AttributeError:
+ method = method_info
+ else:
+ method = method_info
+ else:
+ assert 0, "Could not find method in self.functions and no "\
+ "instance installed"
+
+ methods[method_name] = method
+
+ documenter = ServerHTMLDoc()
+ documentation = documenter.docserver(
+ self.server_name,
+ self.server_documentation,
+ methods
+ )
+
+ return documenter.page(self.server_title, documentation)
+
+class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
+ """XML-RPC and documentation request handler class.
+
+ Handles all HTTP POST requests and attempts to decode them as
+ XML-RPC requests.
+
+ Handles all HTTP GET requests and interprets them as requests
+ for documentation.
+ """
+
+ def do_GET(self):
+ """Handles the HTTP GET request.
+
+ Interpret all HTTP GET requests as requests for server
+ documentation.
+ """
+ # Check that the path is legal
+ if not self.is_rpc_path_valid():
+ self.report_404()
+ return
+
+ response = self.server.generate_html_documentation()
+ self.send_response(200)
+ self.send_header("Content-type", "text/html")
+ self.send_header("Content-length", str(len(response)))
+ self.end_headers()
+ self.wfile.write(response)
+
+ # shut down the connection
+ self.wfile.flush()
+ self.connection.shutdown(1)
+
+class DocXMLRPCServer( SimpleXMLRPCServer,
+ XMLRPCDocGenerator):
+ """XML-RPC and HTML documentation server.
+
+ Adds the ability to serve server documentation to the capabilities
+ of SimpleXMLRPCServer.
+ """
+
+ def __init__(self, addr, requestHandler=DocXMLRPCRequestHandler,
+ logRequests=1):
+ SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests)
+ XMLRPCDocGenerator.__init__(self)
+
+class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler,
+ XMLRPCDocGenerator):
+ """Handler for XML-RPC data and documentation requests passed through
+ CGI"""
+
+ def handle_get(self):
+ """Handles the HTTP GET request.
+
+ Interpret all HTTP GET requests as requests for server
+ documentation.
+ """
+
+ response = self.generate_html_documentation()
+
+ print 'Content-Type: text/html'
+ print 'Content-Length: %d' % len(response)
+ sys.stdout.write(response)
+
+ def __init__(self):
+ CGIXMLRPCRequestHandler.__init__(self)
+ XMLRPCDocGenerator.__init__(self)
+
+if __name__ == '__main__':
+ def deg_to_rad(deg):
+ """deg_to_rad(90) => 1.5707963267948966
+
+ Converts an angle in degrees to an angle in radians"""
+ import math
+ return deg * math.pi / 180
+
+ server = DocXMLRPCServer(("localhost", 8000))
+
+ server.set_server_title("Math Server")
+ server.set_server_name("Math XML-RPC Server")
+ server.set_server_documentation("""This server supports various mathematical functions.
+
+You can use it from Python as follows:
+
+>>> from xmlrpclib import ServerProxy
+>>> s = ServerProxy("http://localhost:8000")
+>>> s.deg_to_rad(90.0)
+1.5707963267948966""")
+
+ server.register_function(deg_to_rad)
+ server.register_introspection_functions()
+
+ server.serve_forever()
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/HTMLParser.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,369 @@
+"""A parser for HTML and XHTML."""
+
+# This file is based on sgmllib.py, but the API is slightly different.
+
+# XXX There should be a way to distinguish between PCDATA (parsed
+# character data -- the normal case), RCDATA (replaceable character
+# data -- only char and entity references and end tags are special)
+# and CDATA (character data -- only end tags are special).
+
+
+import markupbase
+import re
+
+# Regular expressions used for parsing
+
+interesting_normal = re.compile('[&<]')
+interesting_cdata = re.compile(r'<(/|\Z)')
+incomplete = re.compile('&[a-zA-Z#]')
+
+entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
+charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
+
+starttagopen = re.compile('<[a-zA-Z]')
+piclose = re.compile('>')
+commentclose = re.compile(r'--\s*>')
+tagfind = re.compile('[a-zA-Z][-.a-zA-Z0-9:_]*')
+attrfind = re.compile(
+ r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
+ r'(\'[^\']*\'|"[^"]*"|[-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~@]*))?')
+
+locatestarttagend = re.compile(r"""
+ <[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
+ (?:\s+ # whitespace before attribute name
+ (?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
+ (?:\s*=\s* # value indicator
+ (?:'[^']*' # LITA-enclosed value
+ |\"[^\"]*\" # LIT-enclosed value
+ |[^'\">\s]+ # bare value
+ )
+ )?
+ )
+ )*
+ \s* # trailing whitespace
+""", re.VERBOSE)
+endendtag = re.compile('>')
+endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
+
+
+class HTMLParseError(Exception):
+ """Exception raised for all parse errors."""
+
+ def __init__(self, msg, position=(None, None)):
+ assert msg
+ self.msg = msg
+ self.lineno = position[0]
+ self.offset = position[1]
+
+ def __str__(self):
+ result = self.msg
+ if self.lineno is not None:
+ result = result + ", at line %d" % self.lineno
+ if self.offset is not None:
+ result = result + ", column %d" % (self.offset + 1)
+ return result
+
+
+class HTMLParser(markupbase.ParserBase):
+ """Find tags and other markup and call handler functions.
+
+ Usage:
+ p = HTMLParser()
+ p.feed(data)
+ ...
+ p.close()
+
+ Start tags are handled by calling self.handle_starttag() or
+ self.handle_startendtag(); end tags by self.handle_endtag(). The
+ data between tags is passed from the parser to the derived class
+ by calling self.handle_data() with the data as argument (the data
+ may be split up in arbitrary chunks). Entity references are
+ passed by calling self.handle_entityref() with the entity
+ reference as the argument. Numeric character references are
+ passed to self.handle_charref() with the string containing the
+ reference as the argument.
+ """
+
+ CDATA_CONTENT_ELEMENTS = ("script", "style")
+
+
+ def __init__(self):
+ """Initialize and reset this instance."""
+ self.reset()
+
+ def reset(self):
+ """Reset this instance. Loses all unprocessed data."""
+ self.rawdata = ''
+ self.lasttag = '???'
+ self.interesting = interesting_normal
+ markupbase.ParserBase.reset(self)
+
+ def feed(self, data):
+ """Feed data to the parser.
+
+ Call this as often as you want, with as little or as much text
+ as you want (may include '\n').
+ """
+ self.rawdata = self.rawdata + data
+ self.goahead(0)
+
+ def close(self):
+ """Handle any buffered data."""
+ self.goahead(1)
+
+ def error(self, message):
+ raise HTMLParseError(message, self.getpos())
+
+ __starttag_text = None
+
+ def get_starttag_text(self):
+ """Return full source of start tag: '<...>'."""
+ return self.__starttag_text
+
+ def set_cdata_mode(self):
+ self.interesting = interesting_cdata
+
+ def clear_cdata_mode(self):
+ self.interesting = interesting_normal
+
+ # Internal -- handle data as far as reasonable. May leave state
+ # and data to be processed by a subsequent call. If 'end' is
+ # true, force handling all data as if followed by EOF marker.
+ def goahead(self, end):
+ rawdata = self.rawdata
+ i = 0
+ n = len(rawdata)
+ while i < n:
+ match = self.interesting.search(rawdata, i) # < or &
+ if match:
+ j = match.start()
+ else:
+ j = n
+ if i < j: self.handle_data(rawdata[i:j])
+ i = self.updatepos(i, j)
+ if i == n: break
+ startswith = rawdata.startswith
+ if startswith('<', i):
+ if starttagopen.match(rawdata, i): # < + letter
+ k = self.parse_starttag(i)
+ elif startswith("</", i):
+ k = self.parse_endtag(i)
+ elif startswith("<!--", i):
+ k = self.parse_comment(i)
+ elif startswith("<?", i):
+ k = self.parse_pi(i)
+ elif startswith("<!", i):
+ k = self.parse_declaration(i)
+ elif (i + 1) < n:
+ self.handle_data("<")
+ k = i + 1
+ else:
+ break
+ if k < 0:
+ if end:
+ self.error("EOF in middle of construct")
+ break
+ i = self.updatepos(i, k)
+ elif startswith("&#", i):
+ match = charref.match(rawdata, i)
+ if match:
+ name = match.group()[2:-1]
+ self.handle_charref(name)
+ k = match.end()
+ if not startswith(';', k-1):
+ k = k - 1
+ i = self.updatepos(i, k)
+ continue
+ else:
+ break
+ elif startswith('&', i):
+ match = entityref.match(rawdata, i)
+ if match:
+ name = match.group(1)
+ self.handle_entityref(name)
+ k = match.end()
+ if not startswith(';', k-1):
+ k = k - 1
+ i = self.updatepos(i, k)
+ continue
+ match = incomplete.match(rawdata, i)
+ if match:
+ # match.group() will contain at least 2 chars
+ if end and match.group() == rawdata[i:]:
+ self.error("EOF in middle of entity or char ref")
+ # incomplete
+ break
+ elif (i + 1) < n:
+ # not the end of the buffer, and can't be confused
+ # with some other construct
+ self.handle_data("&")
+ i = self.updatepos(i, i + 1)
+ else:
+ break
+ else:
+ assert 0, "interesting.search() lied"
+ # end while
+ if end and i < n:
+ self.handle_data(rawdata[i:n])
+ i = self.updatepos(i, n)
+ self.rawdata = rawdata[i:]
+
+ # Internal -- parse processing instr, return end or -1 if not terminated
+ def parse_pi(self, i):
+ rawdata = self.rawdata
+ assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
+ match = piclose.search(rawdata, i+2) # >
+ if not match:
+ return -1
+ j = match.start()
+ self.handle_pi(rawdata[i+2: j])
+ j = match.end()
+ return j
+
+ # Internal -- handle starttag, return end or -1 if not terminated
+ def parse_starttag(self, i):
+ self.__starttag_text = None
+ endpos = self.check_for_whole_start_tag(i)
+ if endpos < 0:
+ return endpos
+ rawdata = self.rawdata
+ self.__starttag_text = rawdata[i:endpos]
+
+ # Now parse the data between i+1 and j into a tag and attrs
+ attrs = []
+ match = tagfind.match(rawdata, i+1)
+ assert match, 'unexpected call to parse_starttag()'
+ k = match.end()
+ self.lasttag = tag = rawdata[i+1:k].lower()
+
+ while k < endpos:
+ m = attrfind.match(rawdata, k)
+ if not m:
+ break
+ attrname, rest, attrvalue = m.group(1, 2, 3)
+ if not rest:
+ attrvalue = None
+ elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
+ attrvalue[:1] == '"' == attrvalue[-1:]:
+ attrvalue = attrvalue[1:-1]
+ attrvalue = self.unescape(attrvalue)
+ attrs.append((attrname.lower(), attrvalue))
+ k = m.end()
+
+ end = rawdata[k:endpos].strip()
+ if end not in (">", "/>"):
+ lineno, offset = self.getpos()
+ if "\n" in self.__starttag_text:
+ lineno = lineno + self.__starttag_text.count("\n")
+ offset = len(self.__starttag_text) \
+ - self.__starttag_text.rfind("\n")
+ else:
+ offset = offset + len(self.__starttag_text)
+ self.error("junk characters in start tag: %r"
+ % (rawdata[k:endpos][:20],))
+ if end.endswith('/>'):
+ # XHTML-style empty tag: <span attr="value" />
+ self.handle_startendtag(tag, attrs)
+ else:
+ self.handle_starttag(tag, attrs)
+ if tag in self.CDATA_CONTENT_ELEMENTS:
+ self.set_cdata_mode()
+ return endpos
+
+ # Internal -- check to see if we have a complete starttag; return end
+ # or -1 if incomplete.
+ def check_for_whole_start_tag(self, i):
+ rawdata = self.rawdata
+ m = locatestarttagend.match(rawdata, i)
+ if m:
+ j = m.end()
+ next = rawdata[j:j+1]
+ if next == ">":
+ return j + 1
+ if next == "/":
+ if rawdata.startswith("/>", j):
+ return j + 2
+ if rawdata.startswith("/", j):
+ # buffer boundary
+ return -1
+ # else bogus input
+ self.updatepos(i, j + 1)
+ self.error("malformed empty start tag")
+ if next == "":
+ # end of input
+ return -1
+ if next in ("abcdefghijklmnopqrstuvwxyz=/"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
+ # end of input in or before attribute value, or we have the
+ # '/' from a '/>' ending
+ return -1
+ self.updatepos(i, j)
+ self.error("malformed start tag")
+ raise AssertionError("we should not get here!")
+
+ # Internal -- parse endtag, return end or -1 if incomplete
+ def parse_endtag(self, i):
+ rawdata = self.rawdata
+ assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
+ match = endendtag.search(rawdata, i+1) # >
+ if not match:
+ return -1
+ j = match.end()
+ match = endtagfind.match(rawdata, i) # </ + tag + >
+ if not match:
+ self.error("bad end tag: %r" % (rawdata[i:j],))
+ tag = match.group(1)
+ self.handle_endtag(tag.lower())
+ self.clear_cdata_mode()
+ return j
+
+ # Overridable -- finish processing of start+end tag: <tag.../>
+ def handle_startendtag(self, tag, attrs):
+ self.handle_starttag(tag, attrs)
+ self.handle_endtag(tag)
+
+ # Overridable -- handle start tag
+ def handle_starttag(self, tag, attrs):
+ pass
+
+ # Overridable -- handle end tag
+ def handle_endtag(self, tag):
+ pass
+
+ # Overridable -- handle character reference
+ def handle_charref(self, name):
+ pass
+
+ # Overridable -- handle entity reference
+ def handle_entityref(self, name):
+ pass
+
+ # Overridable -- handle data
+ def handle_data(self, data):
+ pass
+
+ # Overridable -- handle comment
+ def handle_comment(self, data):
+ pass
+
+ # Overridable -- handle declaration
+ def handle_decl(self, decl):
+ pass
+
+ # Overridable -- handle processing instruction
+ def handle_pi(self, data):
+ pass
+
+ def unknown_decl(self, data):
+ self.error("unknown declaration: %r" % (data,))
+
+ # Internal -- helper to remove special character quoting
+ def unescape(self, s):
+ if '&' not in s:
+ return s
+ s = s.replace("<", "<")
+ s = s.replace(">", ">")
+ s = s.replace("'", "'")
+ s = s.replace(""", '"')
+ s = s.replace("&", "&") # Must be last
+ return s
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/MimeWriter.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,181 @@
+"""Generic MIME writer.
+
+This module defines the class MimeWriter. The MimeWriter class implements
+a basic formatter for creating MIME multi-part files. It doesn't seek around
+the output file nor does it use large amounts of buffer space. You must write
+the parts out in the order that they should occur in the final file.
+MimeWriter does buffer the headers you add, allowing you to rearrange their
+order.
+
+"""
+
+
+import mimetools
+
+__all__ = ["MimeWriter"]
+
+class MimeWriter:
+
+ """Generic MIME writer.
+
+ Methods:
+
+ __init__()
+ addheader()
+ flushheaders()
+ startbody()
+ startmultipartbody()
+ nextpart()
+ lastpart()
+
+ A MIME writer is much more primitive than a MIME parser. It
+ doesn't seek around on the output file, and it doesn't use large
+ amounts of buffer space, so you have to write the parts in the
+ order they should occur on the output file. It does buffer the
+ headers you add, allowing you to rearrange their order.
+
+ General usage is:
+
+ f = <open the output file>
+ w = MimeWriter(f)
+ ...call w.addheader(key, value) 0 or more times...
+
+ followed by either:
+
+ f = w.startbody(content_type)
+ ...call f.write(data) for body data...
+
+ or:
+
+ w.startmultipartbody(subtype)
+ for each part:
+ subwriter = w.nextpart()
+ ...use the subwriter's methods to create the subpart...
+ w.lastpart()
+
+ The subwriter is another MimeWriter instance, and should be
+ treated in the same way as the toplevel MimeWriter. This way,
+ writing recursive body parts is easy.
+
+ Warning: don't forget to call lastpart()!
+
+ XXX There should be more state so calls made in the wrong order
+ are detected.
+
+ Some special cases:
+
+ - startbody() just returns the file passed to the constructor;
+ but don't use this knowledge, as it may be changed.
+
+ - startmultipartbody() actually returns a file as well;
+ this can be used to write the initial 'if you can read this your
+ mailer is not MIME-aware' message.
+
+ - If you call flushheaders(), the headers accumulated so far are
+ written out (and forgotten); this is useful if you don't need a
+ body part at all, e.g. for a subpart of type message/rfc822
+ that's (mis)used to store some header-like information.
+
+ - Passing a keyword argument 'prefix=<flag>' to addheader(),
+ start*body() affects where the header is inserted; 0 means
+ append at the end, 1 means insert at the start; default is
+ append for addheader(), but insert for start*body(), which use
+ it to determine where the Content-Type header goes.
+
+ """
+
+ def __init__(self, fp):
+ self._fp = fp
+ self._headers = []
+
+ def addheader(self, key, value, prefix=0):
+ """Add a header line to the MIME message.
+
+ The key is the name of the header, where the value obviously provides
+ the value of the header. The optional argument prefix determines
+ where the header is inserted; 0 means append at the end, 1 means
+ insert at the start. The default is to append.
+
+ """
+ lines = value.split("\n")
+ while lines and not lines[-1]: del lines[-1]
+ while lines and not lines[0]: del lines[0]
+ for i in range(1, len(lines)):
+ lines[i] = " " + lines[i].strip()
+ value = "\n".join(lines) + "\n"
+ line = key + ": " + value
+ if prefix:
+ self._headers.insert(0, line)
+ else:
+ self._headers.append(line)
+
+ def flushheaders(self):
+ """Writes out and forgets all headers accumulated so far.
+
+ This is useful if you don't need a body part at all; for example,
+ for a subpart of type message/rfc822 that's (mis)used to store some
+ header-like information.
+
+ """
+ self._fp.writelines(self._headers)
+ self._headers = []
+
+ def startbody(self, ctype, plist=[], prefix=1):
+ """Returns a file-like object for writing the body of the message.
+
+ The content-type is set to the provided ctype, and the optional
+ parameter, plist, provides additional parameters for the
+ content-type declaration. The optional argument prefix determines
+ where the header is inserted; 0 means append at the end, 1 means
+ insert at the start. The default is to insert at the start.
+
+ """
+ for name, value in plist:
+ ctype = ctype + ';\n %s=\"%s\"' % (name, value)
+ self.addheader("Content-Type", ctype, prefix=prefix)
+ self.flushheaders()
+ self._fp.write("\n")
+ return self._fp
+
+ def startmultipartbody(self, subtype, boundary=None, plist=[], prefix=1):
+ """Returns a file-like object for writing the body of the message.
+
+ Additionally, this method initializes the multi-part code, where the
+ subtype parameter provides the multipart subtype, the boundary
+ parameter may provide a user-defined boundary specification, and the
+ plist parameter provides optional parameters for the subtype. The
+ optional argument, prefix, determines where the header is inserted;
+ 0 means append at the end, 1 means insert at the start. The default
+ is to insert at the start. Subparts should be created using the
+ nextpart() method.
+
+ """
+ self._boundary = boundary or mimetools.choose_boundary()
+ return self.startbody("multipart/" + subtype,
+ [("boundary", self._boundary)] + plist,
+ prefix=prefix)
+
+ def nextpart(self):
+ """Returns a new instance of MimeWriter which represents an
+ individual part in a multipart message.
+
+ This may be used to write the part as well as used for creating
+ recursively complex multipart messages. The message must first be
+ initialized with the startmultipartbody() method before using the
+ nextpart() method.
+
+ """
+ self._fp.write("\n--" + self._boundary + "\n")
+ return self.__class__(self._fp)
+
+ def lastpart(self):
+ """This is used to designate the last part of a multipart message.
+
+ It should always be used when writing multipart messages.
+
+ """
+ self._fp.write("\n--" + self._boundary + "--\n")
+
+
+if __name__ == '__main__':
+ import test.test_MimeWriter
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/Queue.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,215 @@
+"""A multi-producer, multi-consumer queue."""
+
+from time import time as _time
+from collections import deque
+
+__all__ = ['Empty', 'Full', 'Queue']
+
+class Empty(Exception):
+ "Exception raised by Queue.get(block=0)/get_nowait()."
+ pass
+
+class Full(Exception):
+ "Exception raised by Queue.put(block=0)/put_nowait()."
+ pass
+
+class Queue:
+ """Create a queue object with a given maximum size.
+
+ If maxsize is <= 0, the queue size is infinite.
+ """
+ def __init__(self, maxsize=0):
+ try:
+ import threading
+ except ImportError:
+ import dummy_threading as threading
+ self._init(maxsize)
+ # mutex must be held whenever the queue is mutating. All methods
+ # that acquire mutex must release it before returning. mutex
+ # is shared between the three conditions, so acquiring and
+ # releasing the conditions also acquires and releases mutex.
+ self.mutex = threading.Lock()
+ # Notify not_empty whenever an item is added to the queue; a
+ # thread waiting to get is notified then.
+ self.not_empty = threading.Condition(self.mutex)
+ # Notify not_full whenever an item is removed from the queue;
+ # a thread waiting to put is notified then.
+ self.not_full = threading.Condition(self.mutex)
+ # Notify all_tasks_done whenever the number of unfinished tasks
+ # drops to zero; thread waiting to join() is notified to resume
+ self.all_tasks_done = threading.Condition(self.mutex)
+ self.unfinished_tasks = 0
+
+ def task_done(self):
+ """Indicate that a formerly enqueued task is complete.
+
+ Used by Queue consumer threads. For each get() used to fetch a task,
+ a subsequent call to task_done() tells the queue that the processing
+ on the task is complete.
+
+ If a join() is currently blocking, it will resume when all items
+ have been processed (meaning that a task_done() call was received
+ for every item that had been put() into the queue).
+
+ Raises a ValueError if called more times than there were items
+ placed in the queue.
+ """
+ self.all_tasks_done.acquire()
+ try:
+ unfinished = self.unfinished_tasks - 1
+ if unfinished <= 0:
+ if unfinished < 0:
+ raise ValueError('task_done() called too many times')
+ self.all_tasks_done.notifyAll()
+ self.unfinished_tasks = unfinished
+ finally:
+ self.all_tasks_done.release()
+
+ def join(self):
+ """Blocks until all items in the Queue have been gotten and processed.
+
+ The count of unfinished tasks goes up whenever an item is added to the
+ queue. The count goes down whenever a consumer thread calls task_done()
+ to indicate the item was retrieved and all work on it is complete.
+
+ When the count of unfinished tasks drops to zero, join() unblocks.
+ """
+ self.all_tasks_done.acquire()
+ try:
+ while self.unfinished_tasks:
+ self.all_tasks_done.wait()
+ finally:
+ self.all_tasks_done.release()
+
+ def qsize(self):
+ """Return the approximate size of the queue (not reliable!)."""
+ self.mutex.acquire()
+ n = self._qsize()
+ self.mutex.release()
+ return n
+
+ def empty(self):
+ """Return True if the queue is empty, False otherwise (not reliable!)."""
+ self.mutex.acquire()
+ n = self._empty()
+ self.mutex.release()
+ return n
+
+ def full(self):
+ """Return True if the queue is full, False otherwise (not reliable!)."""
+ self.mutex.acquire()
+ n = self._full()
+ self.mutex.release()
+ return n
+
+ def put(self, item, block=True, timeout=None):
+ """Put an item into the queue.
+
+ If optional args 'block' is true and 'timeout' is None (the default),
+ block if necessary until a free slot is available. If 'timeout' is
+ a positive number, it blocks at most 'timeout' seconds and raises
+ the Full exception if no free slot was available within that time.
+ Otherwise ('block' is false), put an item on the queue if a free slot
+ is immediately available, else raise the Full exception ('timeout'
+ is ignored in that case).
+ """
+ self.not_full.acquire()
+ try:
+ if not block:
+ if self._full():
+ raise Full
+ elif timeout is None:
+ while self._full():
+ self.not_full.wait()
+ else:
+ if timeout < 0:
+ raise ValueError("'timeout' must be a positive number")
+ endtime = _time() + timeout
+ while self._full():
+ remaining = endtime - _time()
+ if remaining <= 0.0:
+ raise Full
+ self.not_full.wait(remaining)
+ self._put(item)
+ self.unfinished_tasks += 1
+ self.not_empty.notify()
+ finally:
+ self.not_full.release()
+
+ def put_nowait(self, item):
+ """Put an item into the queue without blocking.
+
+ Only enqueue the item if a free slot is immediately available.
+ Otherwise raise the Full exception.
+ """
+ return self.put(item, False)
+
+ def get(self, block=True, timeout=None):
+ """Remove and return an item from the queue.
+
+ If optional args 'block' is true and 'timeout' is None (the default),
+ block if necessary until an item is available. If 'timeout' is
+ a positive number, it blocks at most 'timeout' seconds and raises
+ the Empty exception if no item was available within that time.
+ Otherwise ('block' is false), return an item if one is immediately
+ available, else raise the Empty exception ('timeout' is ignored
+ in that case).
+ """
+ self.not_empty.acquire()
+ try:
+ if not block:
+ if self._empty():
+ raise Empty
+ elif timeout is None:
+ while self._empty():
+ self.not_empty.wait()
+ else:
+ if timeout < 0:
+ raise ValueError("'timeout' must be a positive number")
+ endtime = _time() + timeout
+ while self._empty():
+ remaining = endtime - _time()
+ if remaining <= 0.0:
+ raise Empty
+ self.not_empty.wait(remaining)
+ item = self._get()
+ self.not_full.notify()
+ return item
+ finally:
+ self.not_empty.release()
+
+ def get_nowait(self):
+ """Remove and return an item from the queue without blocking.
+
+ Only get an item if one is immediately available. Otherwise
+ raise the Empty exception.
+ """
+ return self.get(False)
+
+ # Override these methods to implement other queue organizations
+ # (e.g. stack or priority queue).
+ # These will only be called with appropriate locks held
+
+ # Initialize the queue representation
+ def _init(self, maxsize):
+ self.maxsize = maxsize
+ self.queue = deque()
+
+ def _qsize(self):
+ return len(self.queue)
+
+ # Check whether the queue is empty
+ def _empty(self):
+ return not self.queue
+
+ # Check whether the queue is full
+ def _full(self):
+ return self.maxsize > 0 and len(self.queue) == self.maxsize
+
+ # Put a new item in the queue
+ def _put(self, item):
+ self.queue.append(item)
+
+ # Get an item from the queue
+ def _get(self):
+ return self.queue.popleft()
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/SimpleHTTPServer.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,218 @@
+"""Simple HTTP Server.
+
+This module builds on BaseHTTPServer by implementing the standard GET
+and HEAD requests in a fairly straightforward manner.
+
+"""
+
+
+__version__ = "0.6"
+
+__all__ = ["SimpleHTTPRequestHandler"]
+
+import os
+import posixpath
+import BaseHTTPServer
+import urllib
+import urlparse
+import cgi
+import shutil
+import mimetypes
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+
+class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+
+ """Simple HTTP request handler with GET and HEAD commands.
+
+ This serves files from the current directory and any of its
+ subdirectories. The MIME type for files is determined by
+ calling the .guess_type() method.
+
+ The GET and HEAD requests are identical except that the HEAD
+ request omits the actual contents of the file.
+
+ """
+
+ server_version = "SimpleHTTP/" + __version__
+
+ def do_GET(self):
+ """Serve a GET request."""
+ f = self.send_head()
+ if f:
+ self.copyfile(f, self.wfile)
+ f.close()
+
+ def do_HEAD(self):
+ """Serve a HEAD request."""
+ f = self.send_head()
+ if f:
+ f.close()
+
+ def send_head(self):
+ """Common code for GET and HEAD commands.
+
+ This sends the response code and MIME headers.
+
+ Return value is either a file object (which has to be copied
+ to the outputfile by the caller unless the command was HEAD,
+ and must be closed by the caller under all circumstances), or
+ None, in which case the caller has nothing further to do.
+
+ """
+ path = self.translate_path(self.path)
+ f = None
+ if os.path.isdir(path):
+ if not self.path.endswith('/'):
+ # redirect browser - doing basically what apache does
+ self.send_response(301)
+ self.send_header("Location", self.path + "/")
+ self.end_headers()
+ return None
+ for index in "index.html", "index.htm":
+ index = os.path.join(path, index)
+ if os.path.exists(index):
+ path = index
+ break
+ else:
+ return self.list_directory(path)
+ ctype = self.guess_type(path)
+ if ctype.startswith('text/'):
+ mode = 'r'
+ else:
+ mode = 'rb'
+ try:
+ f = open(path, mode)
+ except IOError:
+ self.send_error(404, "File not found")
+ return None
+ self.send_response(200)
+ self.send_header("Content-type", ctype)
+ fs = os.fstat(f.fileno())
+ self.send_header("Content-Length", str(fs[6]))
+ self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
+ self.end_headers()
+ return f
+
+ def list_directory(self, path):
+ """Helper to produce a directory listing (absent index.html).
+
+ Return value is either a file object, or None (indicating an
+ error). In either case, the headers are sent, making the
+ interface the same as for send_head().
+
+ """
+ try:
+ list = os.listdir(path)
+ except os.error:
+ self.send_error(404, "No permission to list directory")
+ return None
+ list.sort(key=lambda a: a.lower())
+ f = StringIO()
+ displaypath = cgi.escape(urllib.unquote(self.path))
+ f.write("<title>Directory listing for %s</title>\n" % displaypath)
+ f.write("<h2>Directory listing for %s</h2>\n" % displaypath)
+ f.write("<hr>\n<ul>\n")
+ for name in list:
+ fullname = os.path.join(path, name)
+ displayname = linkname = name
+ # Append / for directories or @ for symbolic links
+ if os.path.isdir(fullname):
+ displayname = name + "/"
+ linkname = name + "/"
+ if os.path.islink(fullname):
+ displayname = name + "@"
+ # Note: a link to a directory displays with @ and links with /
+ f.write('<li><a href="%s">%s</a>\n'
+ % (urllib.quote(linkname), cgi.escape(displayname)))
+ f.write("</ul>\n<hr>\n")
+ length = f.tell()
+ f.seek(0)
+ self.send_response(200)
+ self.send_header("Content-type", "text/html")
+ self.send_header("Content-Length", str(length))
+ self.end_headers()
+ return f
+
+ def translate_path(self, path):
+ """Translate a /-separated PATH to the local filename syntax.
+
+ Components that mean special things to the local file system
+ (e.g. drive or directory names) are ignored. (XXX They should
+ probably be diagnosed.)
+
+ """
+ # abandon query parameters
+ path = urlparse.urlparse(path)[2]
+ path = posixpath.normpath(urllib.unquote(path))
+ words = path.split('/')
+ words = filter(None, words)
+ path = os.getcwd()
+ for word in words:
+ drive, word = os.path.splitdrive(word)
+ head, word = os.path.split(word)
+ if word in (os.curdir, os.pardir): continue
+ path = os.path.join(path, word)
+ return path
+
+ def copyfile(self, source, outputfile):
+ """Copy all data between two file objects.
+
+ The SOURCE argument is a file object open for reading
+ (or anything with a read() method) and the DESTINATION
+ argument is a file object open for writing (or
+ anything with a write() method).
+
+ The only reason for overriding this would be to change
+ the block size or perhaps to replace newlines by CRLF
+ -- note however that this the default server uses this
+ to copy binary data as well.
+
+ """
+ shutil.copyfileobj(source, outputfile)
+
+ def guess_type(self, path):
+ """Guess the type of a file.
+
+ Argument is a PATH (a filename).
+
+ Return value is a string of the form type/subtype,
+ usable for a MIME Content-type header.
+
+ The default implementation looks the file's extension
+ up in the table self.extensions_map, using application/octet-stream
+ as a default; however it would be permissible (if
+ slow) to look inside the data to make a better guess.
+
+ """
+
+ base, ext = posixpath.splitext(path)
+ if ext in self.extensions_map:
+ return self.extensions_map[ext]
+ ext = ext.lower()
+ if ext in self.extensions_map:
+ return self.extensions_map[ext]
+ else:
+ return self.extensions_map['']
+
+ if not mimetypes.inited:
+ mimetypes.init() # try to read system mime.types
+ extensions_map = mimetypes.types_map.copy()
+ extensions_map.update({
+ '': 'application/octet-stream', # Default
+ '.py': 'text/plain',
+ '.c': 'text/plain',
+ '.h': 'text/plain',
+ })
+
+
+def test(HandlerClass = SimpleHTTPRequestHandler,
+ ServerClass = BaseHTTPServer.HTTPServer):
+ BaseHTTPServer.test(HandlerClass, ServerClass)
+
+
+if __name__ == '__main__':
+ test()
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/SimpleXMLRPCServer.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,595 @@
+"""Simple XML-RPC Server.
+
+This module can be used to create simple XML-RPC servers
+by creating a server and either installing functions, a
+class instance, or by extending the SimpleXMLRPCServer
+class.
+
+It can also be used to handle XML-RPC requests in a CGI
+environment using CGIXMLRPCRequestHandler.
+
+A list of possible usage patterns follows:
+
+1. Install functions:
+
+server = SimpleXMLRPCServer(("localhost", 8000))
+server.register_function(pow)
+server.register_function(lambda x,y: x+y, 'add')
+server.serve_forever()
+
+2. Install an instance:
+
+class MyFuncs:
+ def __init__(self):
+ # make all of the string functions available through
+ # string.func_name
+ import string
+ self.string = string
+ def _listMethods(self):
+ # implement this method so that system.listMethods
+ # knows to advertise the strings methods
+ return list_public_methods(self) + \
+ ['string.' + method for method in list_public_methods(self.string)]
+ def pow(self, x, y): return pow(x, y)
+ def add(self, x, y) : return x + y
+
+server = SimpleXMLRPCServer(("localhost", 8000))
+server.register_introspection_functions()
+server.register_instance(MyFuncs())
+server.serve_forever()
+
+3. Install an instance with custom dispatch method:
+
+class Math:
+ def _listMethods(self):
+ # this method must be present for system.listMethods
+ # to work
+ return ['add', 'pow']
+ def _methodHelp(self, method):
+ # this method must be present for system.methodHelp
+ # to work
+ if method == 'add':
+ return "add(2,3) => 5"
+ elif method == 'pow':
+ return "pow(x, y[, z]) => number"
+ else:
+ # By convention, return empty
+ # string if no help is available
+ return ""
+ def _dispatch(self, method, params):
+ if method == 'pow':
+ return pow(*params)
+ elif method == 'add':
+ return params[0] + params[1]
+ else:
+ raise 'bad method'
+
+server = SimpleXMLRPCServer(("localhost", 8000))
+server.register_introspection_functions()
+server.register_instance(Math())
+server.serve_forever()
+
+4. Subclass SimpleXMLRPCServer:
+
+class MathServer(SimpleXMLRPCServer):
+ def _dispatch(self, method, params):
+ try:
+ # We are forcing the 'export_' prefix on methods that are
+ # callable through XML-RPC to prevent potential security
+ # problems
+ func = getattr(self, 'export_' + method)
+ except AttributeError:
+ raise Exception('method "%s" is not supported' % method)
+ else:
+ return func(*params)
+
+ def export_add(self, x, y):
+ return x + y
+
+server = MathServer(("localhost", 8000))
+server.serve_forever()
+
+5. CGI script:
+
+server = CGIXMLRPCRequestHandler()
+server.register_function(pow)
+server.handle_request()
+"""
+
+# Written by Brian Quinlan (brian@sweetapp.com).
+# Based on code written by Fredrik Lundh.
+
+import xmlrpclib
+from xmlrpclib import Fault
+import SocketServer
+import BaseHTTPServer
+import sys
+import os
+try:
+ import fcntl
+except ImportError:
+ fcntl = None
+
+def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
+ """resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
+
+ Resolves a dotted attribute name to an object. Raises
+ an AttributeError if any attribute in the chain starts with a '_'.
+
+ If the optional allow_dotted_names argument is false, dots are not
+ supported and this function operates similar to getattr(obj, attr).
+ """
+
+ if allow_dotted_names:
+ attrs = attr.split('.')
+ else:
+ attrs = [attr]
+
+ for i in attrs:
+ if i.startswith('_'):
+ raise AttributeError(
+ 'attempt to access private attribute "%s"' % i
+ )
+ else:
+ obj = getattr(obj,i)
+ return obj
+
+def list_public_methods(obj):
+ """Returns a list of attribute strings, found in the specified
+ object, which represent callable attributes"""
+
+ return [member for member in dir(obj)
+ if not member.startswith('_') and
+ callable(getattr(obj, member))]
+
+def remove_duplicates(lst):
+ """remove_duplicates([2,2,2,1,3,3]) => [3,1,2]
+
+ Returns a copy of a list without duplicates. Every list
+ item must be hashable and the order of the items in the
+ resulting list is not defined.
+ """
+ u = {}
+ for x in lst:
+ u[x] = 1
+
+ return u.keys()
+
+class SimpleXMLRPCDispatcher:
+ """Mix-in class that dispatches XML-RPC requests.
+
+ This class is used to register XML-RPC method handlers
+ and then to dispatch them. There should never be any
+ reason to instantiate this class directly.
+ """
+
+ def __init__(self, allow_none, encoding):
+ self.funcs = {}
+ self.instance = None
+ self.allow_none = allow_none
+ self.encoding = encoding
+
+ def register_instance(self, instance, allow_dotted_names=False):
+ """Registers an instance to respond to XML-RPC requests.
+
+ Only one instance can be installed at a time.
+
+ If the registered instance has a _dispatch method then that
+ method will be called with the name of the XML-RPC method and
+ its parameters as a tuple
+ e.g. instance._dispatch('add',(2,3))
+
+ If the registered instance does not have a _dispatch method
+ then the instance will be searched to find a matching method
+ and, if found, will be called. Methods beginning with an '_'
+ are considered private and will not be called by
+ SimpleXMLRPCServer.
+
+ If a registered function matches a XML-RPC request, then it
+ will be called instead of the registered instance.
+
+ If the optional allow_dotted_names argument is true and the
+ instance does not have a _dispatch method, method names
+ containing dots are supported and resolved, as long as none of
+ the name segments start with an '_'.
+
+ *** SECURITY WARNING: ***
+
+ Enabling the allow_dotted_names options allows intruders
+ to access your module's global variables and may allow
+ intruders to execute arbitrary code on your machine. Only
+ use this option on a secure, closed network.
+
+ """
+
+ self.instance = instance
+ self.allow_dotted_names = allow_dotted_names
+
+ def register_function(self, function, name = None):
+ """Registers a function to respond to XML-RPC requests.
+
+ The optional name argument can be used to set a Unicode name
+ for the function.
+ """
+
+ if name is None:
+ name = function.__name__
+ self.funcs[name] = function
+
+ def register_introspection_functions(self):
+ """Registers the XML-RPC introspection methods in the system
+ namespace.
+
+ see http://xmlrpc.usefulinc.com/doc/reserved.html
+ """
+
+ self.funcs.update({'system.listMethods' : self.system_listMethods,
+ 'system.methodSignature' : self.system_methodSignature,
+ 'system.methodHelp' : self.system_methodHelp})
+
+ def register_multicall_functions(self):
+ """Registers the XML-RPC multicall method in the system
+ namespace.
+
+ see http://www.xmlrpc.com/discuss/msgReader$1208"""
+
+ self.funcs.update({'system.multicall' : self.system_multicall})
+
+ def _marshaled_dispatch(self, data, dispatch_method = None):
+ """Dispatches an XML-RPC method from marshalled (XML) data.
+
+ XML-RPC methods are dispatched from the marshalled (XML) data
+ using the _dispatch method and the result is returned as
+ marshalled data. For backwards compatibility, a dispatch
+ function can be provided as an argument (see comment in
+ SimpleXMLRPCRequestHandler.do_POST) but overriding the
+ existing method through subclassing is the prefered means
+ of changing method dispatch behavior.
+ """
+
+ try:
+ params, method = xmlrpclib.loads(data)
+
+ # generate response
+ if dispatch_method is not None:
+ response = dispatch_method(method, params)
+ else:
+ response = self._dispatch(method, params)
+ # wrap response in a singleton tuple
+ response = (response,)
+ response = xmlrpclib.dumps(response, methodresponse=1,
+ allow_none=self.allow_none, encoding=self.encoding)
+ except Fault, fault:
+ response = xmlrpclib.dumps(fault, allow_none=self.allow_none,
+ encoding=self.encoding)
+ except:
+ # report exception back to server
+ response = xmlrpclib.dumps(
+ xmlrpclib.Fault(1, "%s:%s" % (sys.exc_type, sys.exc_value)),
+ encoding=self.encoding, allow_none=self.allow_none,
+ )
+
+ return response
+
+ def system_listMethods(self):
+ """system.listMethods() => ['add', 'subtract', 'multiple']
+
+ Returns a list of the methods supported by the server."""
+
+ methods = self.funcs.keys()
+ if self.instance is not None:
+ # Instance can implement _listMethod to return a list of
+ # methods
+ if hasattr(self.instance, '_listMethods'):
+ methods = remove_duplicates(
+ methods + self.instance._listMethods()
+ )
+ # if the instance has a _dispatch method then we
+ # don't have enough information to provide a list
+ # of methods
+ elif not hasattr(self.instance, '_dispatch'):
+ methods = remove_duplicates(
+ methods + list_public_methods(self.instance)
+ )
+ methods.sort()
+ return methods
+
+ def system_methodSignature(self, method_name):
+ """system.methodSignature('add') => [double, int, int]
+
+ Returns a list describing the signature of the method. In the
+ above example, the add method takes two integers as arguments
+ and returns a double result.
+
+ This server does NOT support system.methodSignature."""
+
+ # See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html
+
+ return 'signatures not supported'
+
+ def system_methodHelp(self, method_name):
+ """system.methodHelp('add') => "Adds two integers together"
+
+ Returns a string containing documentation for the specified method."""
+
+ method = None
+ if self.funcs.has_key(method_name):
+ method = self.funcs[method_name]
+ elif self.instance is not None:
+ # Instance can implement _methodHelp to return help for a method
+ if hasattr(self.instance, '_methodHelp'):
+ return self.instance._methodHelp(method_name)
+ # if the instance has a _dispatch method then we
+ # don't have enough information to provide help
+ elif not hasattr(self.instance, '_dispatch'):
+ try:
+ method = resolve_dotted_attribute(
+ self.instance,
+ method_name,
+ self.allow_dotted_names
+ )
+ except AttributeError:
+ pass
+
+ # Note that we aren't checking that the method actually
+ # be a callable object of some kind
+ if method is None:
+ return ""
+ else:
+ import pydoc
+ return pydoc.getdoc(method)
+
+ def system_multicall(self, call_list):
+ """system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \
+[[4], ...]
+
+ Allows the caller to package multiple XML-RPC calls into a single
+ request.
+
+ See http://www.xmlrpc.com/discuss/msgReader$1208
+ """
+
+ results = []
+ for call in call_list:
+ method_name = call['methodName']
+ params = call['params']
+
+ try:
+ # XXX A marshalling error in any response will fail the entire
+ # multicall. If someone cares they should fix this.
+ results.append([self._dispatch(method_name, params)])
+ except Fault, fault:
+ results.append(
+ {'faultCode' : fault.faultCode,
+ 'faultString' : fault.faultString}
+ )
+ except:
+ results.append(
+ {'faultCode' : 1,
+ 'faultString' : "%s:%s" % (sys.exc_type, sys.exc_value)}
+ )
+ return results
+
+ def _dispatch(self, method, params):
+ """Dispatches the XML-RPC method.
+
+ XML-RPC calls are forwarded to a registered function that
+ matches the called XML-RPC method name. If no such function
+ exists then the call is forwarded to the registered instance,
+ if available.
+
+ If the registered instance has a _dispatch method then that
+ method will be called with the name of the XML-RPC method and
+ its parameters as a tuple
+ e.g. instance._dispatch('add',(2,3))
+
+ If the registered instance does not have a _dispatch method
+ then the instance will be searched to find a matching method
+ and, if found, will be called.
+
+ Methods beginning with an '_' are considered private and will
+ not be called.
+ """
+
+ func = None
+ try:
+ # check to see if a matching function has been registered
+ func = self.funcs[method]
+ except KeyError:
+ if self.instance is not None:
+ # check for a _dispatch method
+ if hasattr(self.instance, '_dispatch'):
+ return self.instance._dispatch(method, params)
+ else:
+ # call instance method directly
+ try:
+ func = resolve_dotted_attribute(
+ self.instance,
+ method,
+ self.allow_dotted_names
+ )
+ except AttributeError:
+ pass
+
+ if func is not None:
+ return func(*params)
+ else:
+ raise Exception('method "%s" is not supported' % method)
+
+class SimpleXMLRPCRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+ """Simple XML-RPC request handler class.
+
+ Handles all HTTP POST requests and attempts to decode them as
+ XML-RPC requests.
+ """
+
+ # Class attribute listing the accessible path components;
+ # paths not on this list will result in a 404 error.
+ rpc_paths = ('/', '/RPC2')
+
+ def is_rpc_path_valid(self):
+ if self.rpc_paths:
+ return self.path in self.rpc_paths
+ else:
+ # If .rpc_paths is empty, just assume all paths are legal
+ return True
+
+ def do_POST(self):
+ """Handles the HTTP POST request.
+
+ Attempts to interpret all HTTP POST requests as XML-RPC calls,
+ which are forwarded to the server's _dispatch method for handling.
+ """
+
+ # Check that the path is legal
+ if not self.is_rpc_path_valid():
+ self.report_404()
+ return
+
+ try:
+ # Get arguments by reading body of request.
+ # We read this in chunks to avoid straining
+ # socket.read(); around the 10 or 15Mb mark, some platforms
+ # begin to have problems (bug #792570).
+ max_chunk_size = 10*1024*1024
+ size_remaining = int(self.headers["content-length"])
+ L = []
+ while size_remaining:
+ chunk_size = min(size_remaining, max_chunk_size)
+ L.append(self.rfile.read(chunk_size))
+ size_remaining -= len(L[-1])
+ data = ''.join(L)
+
+ # In previous versions of SimpleXMLRPCServer, _dispatch
+ # could be overridden in this class, instead of in
+ # SimpleXMLRPCDispatcher. To maintain backwards compatibility,
+ # check to see if a subclass implements _dispatch and dispatch
+ # using that method if present.
+ response = self.server._marshaled_dispatch(
+ data, getattr(self, '_dispatch', None)
+ )
+ except: # This should only happen if the module is buggy
+ # internal error, report as HTTP server error
+ self.send_response(500)
+ self.end_headers()
+ else:
+ # got a valid XML RPC response
+ self.send_response(200)
+ self.send_header("Content-type", "text/xml")
+ self.send_header("Content-length", str(len(response)))
+ self.end_headers()
+ self.wfile.write(response)
+
+ # shut down the connection
+ self.wfile.flush()
+ self.connection.shutdown(1)
+
+ def report_404 (self):
+ # Report a 404 error
+ self.send_response(404)
+ response = 'No such page'
+ self.send_header("Content-type", "text/plain")
+ self.send_header("Content-length", str(len(response)))
+ self.end_headers()
+ self.wfile.write(response)
+ # shut down the connection
+ self.wfile.flush()
+ self.connection.shutdown(1)
+
+ def log_request(self, code='-', size='-'):
+ """Selectively log an accepted request."""
+
+ if self.server.logRequests:
+ BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size)
+
+class SimpleXMLRPCServer(SocketServer.TCPServer,
+ SimpleXMLRPCDispatcher):
+ """Simple XML-RPC server.
+
+ Simple XML-RPC server that allows functions and a single instance
+ to be installed to handle requests. The default implementation
+ attempts to dispatch XML-RPC calls to the functions or instance
+ installed in the server. Override the _dispatch method inhereted
+ from SimpleXMLRPCDispatcher to change this behavior.
+ """
+
+ allow_reuse_address = True
+
+ def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
+ logRequests=True, allow_none=False, encoding=None):
+ self.logRequests = logRequests
+
+ SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
+ SocketServer.TCPServer.__init__(self, addr, requestHandler)
+
+ # [Bug #1222790] If possible, set close-on-exec flag; if a
+ # method spawns a subprocess, the subprocess shouldn't have
+ # the listening socket open.
+ if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
+ flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
+ flags |= fcntl.FD_CLOEXEC
+ fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
+
+class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher):
+ """Simple handler for XML-RPC data passed through CGI."""
+
+ def __init__(self, allow_none=False, encoding=None):
+ SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
+
+ def handle_xmlrpc(self, request_text):
+ """Handle a single XML-RPC request"""
+
+ response = self._marshaled_dispatch(request_text)
+
+ print 'Content-Type: text/xml'
+ print 'Content-Length: %d' % len(response)
+ sys.stdout.write(response)
+
+ def handle_get(self):
+ """Handle a single HTTP GET request.
+
+ Default implementation indicates an error because
+ XML-RPC uses the POST method.
+ """
+
+ code = 400
+ message, explain = \
+ BaseHTTPServer.BaseHTTPRequestHandler.responses[code]
+
+ response = BaseHTTPServer.DEFAULT_ERROR_MESSAGE % \
+ {
+ 'code' : code,
+ 'message' : message,
+ 'explain' : explain
+ }
+ print 'Status: %d %s' % (code, message)
+ print 'Content-Type: text/html'
+ print 'Content-Length: %d' % len(response)
+ sys.stdout.write(response)
+
+ def handle_request(self, request_text = None):
+ """Handle a single XML-RPC request passed through a CGI post method.
+
+ If no XML data is given then it is read from stdin. The resulting
+ XML-RPC response is printed to stdout along with the correct HTTP
+ headers.
+ """
+
+ if request_text is None and \
+ os.environ.get('REQUEST_METHOD', None) == 'GET':
+ self.handle_get()
+ else:
+ # POST data is normally available through stdin
+ if request_text is None:
+ request_text = sys.stdin.read()
+
+ self.handle_xmlrpc(request_text)
+
+if __name__ == '__main__':
+ print 'Running XML-RPC server on port 8000'
+ server = SimpleXMLRPCServer(("localhost", 8000))
+ server.register_function(pow)
+ server.register_function(lambda x,y: x+y, 'add')
+ server.serve_forever()
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/SocketServer.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,588 @@
+"""Generic socket server classes.
+
+This module tries to capture the various aspects of defining a server:
+
+For socket-based servers:
+
+- address family:
+ - AF_INET{,6}: IP (Internet Protocol) sockets (default)
+ - AF_UNIX: Unix domain sockets
+ - others, e.g. AF_DECNET are conceivable (see <socket.h>
+- socket type:
+ - SOCK_STREAM (reliable stream, e.g. TCP)
+ - SOCK_DGRAM (datagrams, e.g. UDP)
+
+For request-based servers (including socket-based):
+
+- client address verification before further looking at the request
+ (This is actually a hook for any processing that needs to look
+ at the request before anything else, e.g. logging)
+- how to handle multiple requests:
+ - synchronous (one request is handled at a time)
+ - forking (each request is handled by a new process)
+ - threading (each request is handled by a new thread)
+
+The classes in this module favor the server type that is simplest to
+write: a synchronous TCP/IP server. This is bad class design, but
+save some typing. (There's also the issue that a deep class hierarchy
+slows down method lookups.)
+
+There are five classes in an inheritance diagram, four of which represent
+synchronous servers of four types:
+
+ +------------+
+ | BaseServer |
+ +------------+
+ |
+ v
+ +-----------+ +------------------+
+ | TCPServer |------->| UnixStreamServer |
+ +-----------+ +------------------+
+ |
+ v
+ +-----------+ +--------------------+
+ | UDPServer |------->| UnixDatagramServer |
+ +-----------+ +--------------------+
+
+Note that UnixDatagramServer derives from UDPServer, not from
+UnixStreamServer -- the only difference between an IP and a Unix
+stream server is the address family, which is simply repeated in both
+unix server classes.
+
+Forking and threading versions of each type of server can be created
+using the ForkingMixIn and ThreadingMixIn mix-in classes. For
+instance, a threading UDP server class is created as follows:
+
+ class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
+
+The Mix-in class must come first, since it overrides a method defined
+in UDPServer! Setting the various member variables also changes
+the behavior of the underlying server mechanism.
+
+To implement a service, you must derive a class from
+BaseRequestHandler and redefine its handle() method. You can then run
+various versions of the service by combining one of the server classes
+with your request handler class.
+
+The request handler class must be different for datagram or stream
+services. This can be hidden by using the request handler
+subclasses StreamRequestHandler or DatagramRequestHandler.
+
+Of course, you still have to use your head!
+
+For instance, it makes no sense to use a forking server if the service
+contains state in memory that can be modified by requests (since the
+modifications in the child process would never reach the initial state
+kept in the parent process and passed to each child). In this case,
+you can use a threading server, but you will probably have to use
+locks to avoid two requests that come in nearly simultaneous to apply
+conflicting changes to the server state.
+
+On the other hand, if you are building e.g. an HTTP server, where all
+data is stored externally (e.g. in the file system), a synchronous
+class will essentially render the service "deaf" while one request is
+being handled -- which may be for a very long time if a client is slow
+to reqd all the data it has requested. Here a threading or forking
+server is appropriate.
+
+In some cases, it may be appropriate to process part of a request
+synchronously, but to finish processing in a forked child depending on
+the request data. This can be implemented by using a synchronous
+server and doing an explicit fork in the request handler class
+handle() method.
+
+Another approach to handling multiple simultaneous requests in an
+environment that supports neither threads nor fork (or where these are
+too expensive or inappropriate for the service) is to maintain an
+explicit table of partially finished requests and to use select() to
+decide which request to work on next (or whether to handle a new
+incoming request). This is particularly important for stream services
+where each client can potentially be connected for a long time (if
+threads or subprocesses cannot be used).
+
+Future work:
+- Standard classes for Sun RPC (which uses either UDP or TCP)
+- Standard mix-in classes to implement various authentication
+ and encryption schemes
+- Standard framework for select-based multiplexing
+
+XXX Open problems:
+- What to do with out-of-band data?
+
+BaseServer:
+- split generic "request" functionality out into BaseServer class.
+ Copyright (C) 2000 Luke Kenneth Casson Leighton <lkcl@samba.org>
+
+ example: read entries from a SQL database (requires overriding
+ get_request() to return a table entry from the database).
+ entry is processed by a RequestHandlerClass.
+
+"""
+
+# Author of the BaseServer patch: Luke Kenneth Casson Leighton
+
+# XXX Warning!
+# There is a test suite for this module, but it cannot be run by the
+# standard regression test.
+# To run it manually, run Lib/test/test_socketserver.py.
+
+__version__ = "0.4"
+
+
+import socket
+import sys
+import os
+
+__all__ = ["TCPServer","UDPServer","ForkingUDPServer","ForkingTCPServer",
+ "ThreadingUDPServer","ThreadingTCPServer","BaseRequestHandler",
+ "StreamRequestHandler","DatagramRequestHandler",
+ "ThreadingMixIn", "ForkingMixIn"]
+if hasattr(socket, "AF_UNIX"):
+ __all__.extend(["UnixStreamServer","UnixDatagramServer",
+ "ThreadingUnixStreamServer",
+ "ThreadingUnixDatagramServer"])
+
+class BaseServer:
+
+ """Base class for server classes.
+
+ Methods for the caller:
+
+ - __init__(server_address, RequestHandlerClass)
+ - serve_forever()
+ - handle_request() # if you do not use serve_forever()
+ - fileno() -> int # for select()
+
+ Methods that may be overridden:
+
+ - server_bind()
+ - server_activate()
+ - get_request() -> request, client_address
+ - verify_request(request, client_address)
+ - server_close()
+ - process_request(request, client_address)
+ - close_request(request)
+ - handle_error()
+
+ Methods for derived classes:
+
+ - finish_request(request, client_address)
+
+ Class variables that may be overridden by derived classes or
+ instances:
+
+ - address_family
+ - socket_type
+ - allow_reuse_address
+
+ Instance variables:
+
+ - RequestHandlerClass
+ - socket
+
+ """
+
+ def __init__(self, server_address, RequestHandlerClass):
+ """Constructor. May be extended, do not override."""
+ self.server_address = server_address
+ self.RequestHandlerClass = RequestHandlerClass
+
+ def server_activate(self):
+ """Called by constructor to activate the server.
+
+ May be overridden.
+
+ """
+ pass
+
+ def serve_forever(self):
+ """Handle one request at a time until doomsday."""
+ while 1:
+ self.handle_request()
+
+ # The distinction between handling, getting, processing and
+ # finishing a request is fairly arbitrary. Remember:
+ #
+ # - handle_request() is the top-level call. It calls
+ # get_request(), verify_request() and process_request()
+ # - get_request() is different for stream or datagram sockets
+ # - process_request() is the place that may fork a new process
+ # or create a new thread to finish the request
+ # - finish_request() instantiates the request handler class;
+ # this constructor will handle the request all by itself
+
+ def handle_request(self):
+ """Handle one request, possibly blocking."""
+ try:
+ request, client_address = self.get_request()
+ except socket.error:
+ return
+ if self.verify_request(request, client_address):
+ try:
+ self.process_request(request, client_address)
+ except:
+ self.handle_error(request, client_address)
+ self.close_request(request)
+
+ def verify_request(self, request, client_address):
+ """Verify the request. May be overridden.
+
+ Return True if we should proceed with this request.
+
+ """
+ return True
+
+ def process_request(self, request, client_address):
+ """Call finish_request.
+
+ Overridden by ForkingMixIn and ThreadingMixIn.
+
+ """
+ self.finish_request(request, client_address)
+ self.close_request(request)
+
+ def server_close(self):
+ """Called to clean-up the server.
+
+ May be overridden.
+
+ """
+ pass
+
+ def finish_request(self, request, client_address):
+ """Finish one request by instantiating RequestHandlerClass."""
+ self.RequestHandlerClass(request, client_address, self)
+
+ def close_request(self, request):
+ """Called to clean up an individual request."""
+ pass
+
+ def handle_error(self, request, client_address):
+ """Handle an error gracefully. May be overridden.
+
+ The default is to print a traceback and continue.
+
+ """
+ print '-'*40
+ print 'Exception happened during processing of request from',
+ print client_address
+ import traceback
+ traceback.print_exc() # XXX But this goes to stderr!
+ print '-'*40
+
+
+class TCPServer(BaseServer):
+
+ """Base class for various socket-based server classes.
+
+ Defaults to synchronous IP stream (i.e., TCP).
+
+ Methods for the caller:
+
+ - __init__(server_address, RequestHandlerClass)
+ - serve_forever()
+ - handle_request() # if you don't use serve_forever()
+ - fileno() -> int # for select()
+
+ Methods that may be overridden:
+
+ - server_bind()
+ - server_activate()
+ - get_request() -> request, client_address
+ - verify_request(request, client_address)
+ - process_request(request, client_address)
+ - close_request(request)
+ - handle_error()
+
+ Methods for derived classes:
+
+ - finish_request(request, client_address)
+
+ Class variables that may be overridden by derived classes or
+ instances:
+
+ - address_family
+ - socket_type
+ - request_queue_size (only for stream sockets)
+ - allow_reuse_address
+
+ Instance variables:
+
+ - server_address
+ - RequestHandlerClass
+ - socket
+
+ """
+
+ address_family = socket.AF_INET
+
+ socket_type = socket.SOCK_STREAM
+
+ request_queue_size = 5
+
+ allow_reuse_address = False
+
+ def __init__(self, server_address, RequestHandlerClass):
+ """Constructor. May be extended, do not override."""
+ BaseServer.__init__(self, server_address, RequestHandlerClass)
+ self.socket = socket.socket(self.address_family,
+ self.socket_type)
+ self.server_bind()
+ self.server_activate()
+
+ def server_bind(self):
+ """Called by constructor to bind the socket.
+
+ May be overridden.
+
+ """
+ if self.allow_reuse_address:
+ self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ self.socket.bind(self.server_address)
+ self.server_address = self.socket.getsockname()
+
+ def server_activate(self):
+ """Called by constructor to activate the server.
+
+ May be overridden.
+
+ """
+ self.socket.listen(self.request_queue_size)
+
+ def server_close(self):
+ """Called to clean-up the server.
+
+ May be overridden.
+
+ """
+ self.socket.close()
+
+ def fileno(self):
+ """Return socket file number.
+
+ Interface required by select().
+
+ """
+ return self.socket.fileno()
+
+ def get_request(self):
+ """Get the request and client address from the socket.
+
+ May be overridden.
+
+ """
+ return self.socket.accept()
+
+ def close_request(self, request):
+ """Called to clean up an individual request."""
+ request.close()
+
+
+class UDPServer(TCPServer):
+
+ """UDP server class."""
+
+ allow_reuse_address = False
+
+ socket_type = socket.SOCK_DGRAM
+
+ max_packet_size = 8192
+
+ def get_request(self):
+ data, client_addr = self.socket.recvfrom(self.max_packet_size)
+ return (data, self.socket), client_addr
+
+ def server_activate(self):
+ # No need to call listen() for UDP.
+ pass
+
+ def close_request(self, request):
+ # No need to close anything.
+ pass
+
+class ForkingMixIn:
+
+ """Mix-in class to handle each request in a new process."""
+
+ active_children = None
+ max_children = 40
+
+ def collect_children(self):
+ """Internal routine to wait for died children."""
+ while self.active_children:
+ if len(self.active_children) < self.max_children:
+ options = os.WNOHANG
+ else:
+ # If the maximum number of children are already
+ # running, block while waiting for a child to exit
+ options = 0
+ try:
+ pid, status = os.waitpid(0, options)
+ except os.error:
+ pid = None
+ if not pid: break
+ self.active_children.remove(pid)
+
+ def process_request(self, request, client_address):
+ """Fork a new subprocess to process the request."""
+ self.collect_children()
+ pid = os.fork()
+ if pid:
+ # Parent process
+ if self.active_children is None:
+ self.active_children = []
+ self.active_children.append(pid)
+ self.close_request(request)
+ return
+ else:
+ # Child process.
+ # This must never return, hence os._exit()!
+ try:
+ self.finish_request(request, client_address)
+ os._exit(0)
+ except:
+ try:
+ self.handle_error(request, client_address)
+ finally:
+ os._exit(1)
+
+
+class ThreadingMixIn:
+ """Mix-in class to handle each request in a new thread."""
+
+ # Decides how threads will act upon termination of the
+ # main process
+ daemon_threads = False
+
+ def process_request_thread(self, request, client_address):
+ """Same as in BaseServer but as a thread.
+
+ In addition, exception handling is done here.
+
+ """
+ try:
+ self.finish_request(request, client_address)
+ self.close_request(request)
+ except:
+ self.handle_error(request, client_address)
+ self.close_request(request)
+
+ def process_request(self, request, client_address):
+ """Start a new thread to process the request."""
+ import threading
+ t = threading.Thread(target = self.process_request_thread,
+ args = (request, client_address))
+ if self.daemon_threads:
+ t.setDaemon (1)
+ t.start()
+
+
+class ForkingUDPServer(ForkingMixIn, UDPServer): pass
+class ForkingTCPServer(ForkingMixIn, TCPServer): pass
+
+class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
+class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass
+
+if hasattr(socket, 'AF_UNIX'):
+
+ class UnixStreamServer(TCPServer):
+ address_family = socket.AF_UNIX
+
+ class UnixDatagramServer(UDPServer):
+ address_family = socket.AF_UNIX
+
+ class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer): pass
+
+ class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer): pass
+
+class BaseRequestHandler:
+
+ """Base class for request handler classes.
+
+ This class is instantiated for each request to be handled. The
+ constructor sets the instance variables request, client_address
+ and server, and then calls the handle() method. To implement a
+ specific service, all you need to do is to derive a class which
+ defines a handle() method.
+
+ The handle() method can find the request as self.request, the
+ client address as self.client_address, and the server (in case it
+ needs access to per-server information) as self.server. Since a
+ separate instance is created for each request, the handle() method
+ can define arbitrary other instance variariables.
+
+ """
+
+ def __init__(self, request, client_address, server):
+ self.request = request
+ self.client_address = client_address
+ self.server = server
+ try:
+ self.setup()
+ self.handle()
+ self.finish()
+ finally:
+ sys.exc_traceback = None # Help garbage collection
+
+ def setup(self):
+ pass
+
+ def handle(self):
+ pass
+
+ def finish(self):
+ pass
+
+
+# The following two classes make it possible to use the same service
+# class for stream or datagram servers.
+# Each class sets up these instance variables:
+# - rfile: a file object from which receives the request is read
+# - wfile: a file object to which the reply is written
+# When the handle() method returns, wfile is flushed properly
+
+
+class StreamRequestHandler(BaseRequestHandler):
+
+ """Define self.rfile and self.wfile for stream sockets."""
+
+ # Default buffer sizes for rfile, wfile.
+ # We default rfile to buffered because otherwise it could be
+ # really slow for large data (a getc() call per byte); we make
+ # wfile unbuffered because (a) often after a write() we want to
+ # read and we need to flush the line; (b) big writes to unbuffered
+ # files are typically optimized by stdio even when big reads
+ # aren't.
+ rbufsize = -1
+ wbufsize = 0
+
+ def setup(self):
+ self.connection = self.request
+ self.rfile = self.connection.makefile('rb', self.rbufsize)
+ self.wfile = self.connection.makefile('wb', self.wbufsize)
+
+ def finish(self):
+ if not self.wfile.closed:
+ self.wfile.flush()
+ self.wfile.close()
+ self.rfile.close()
+
+
+class DatagramRequestHandler(BaseRequestHandler):
+
+ # XXX Regrettably, I cannot get this working on Linux;
+ # s.recvfrom() doesn't return a meaningful client address.
+
+ """Define self.rfile and self.wfile for datagram sockets."""
+
+ def setup(self):
+ try:
+ from cStringIO import StringIO
+ except ImportError:
+ from StringIO import StringIO
+ self.packet, self.socket = self.request
+ self.rfile = StringIO(self.packet)
+ self.wfile = StringIO()
+
+ def finish(self):
+ self.socket.sendto(self.wfile.getvalue(), self.client_address)
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/StringIO.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,323 @@
+r"""File-like objects that read from or write to a string buffer.
+
+This implements (nearly) all stdio methods.
+
+f = StringIO() # ready for writing
+f = StringIO(buf) # ready for reading
+f.close() # explicitly release resources held
+flag = f.isatty() # always false
+pos = f.tell() # get current position
+f.seek(pos) # set current position
+f.seek(pos, mode) # mode 0: absolute; 1: relative; 2: relative to EOF
+buf = f.read() # read until EOF
+buf = f.read(n) # read up to n bytes
+buf = f.readline() # read until end of line ('\n') or EOF
+list = f.readlines()# list of f.readline() results until EOF
+f.truncate([size]) # truncate file at to at most size (default: current pos)
+f.write(buf) # write at current position
+f.writelines(list) # for line in list: f.write(line)
+f.getvalue() # return whole file's contents as a string
+
+Notes:
+- Using a real file is often faster (but less convenient).
+- There's also a much faster implementation in C, called cStringIO, but
+ it's not subclassable.
+- fileno() is left unimplemented so that code which uses it triggers
+ an exception early.
+- Seeking far beyond EOF and then writing will insert real null
+ bytes that occupy space in the buffer.
+- There's a simple test set (see end of this file).
+"""
+try:
+ from errno import EINVAL
+except ImportError:
+ EINVAL = 22
+
+__all__ = ["StringIO"]
+
+def _complain_ifclosed(closed):
+ if closed:
+ raise ValueError, "I/O operation on closed file"
+
+class StringIO:
+ """class StringIO([buffer])
+
+ When a StringIO object is created, it can be initialized to an existing
+ string by passing the string to the constructor. If no string is given,
+ the StringIO will start empty.
+
+ The StringIO object can accept either Unicode or 8-bit strings, but
+ mixing the two may take some care. If both are used, 8-bit strings that
+ cannot be interpreted as 7-bit ASCII (that use the 8th bit) will cause
+ a UnicodeError to be raised when getvalue() is called.
+ """
+ def __init__(self, buf = ''):
+ # Force self.buf to be a string or unicode
+ if not isinstance(buf, basestring):
+ buf = str(buf)
+ self.buf = buf
+ self.len = len(buf)
+ self.buflist = []
+ self.pos = 0
+ self.closed = False
+ self.softspace = 0
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ """A file object is its own iterator, for example iter(f) returns f
+ (unless f is closed). When a file is used as an iterator, typically
+ in a for loop (for example, for line in f: print line), the next()
+ method is called repeatedly. This method returns the next input line,
+ or raises StopIteration when EOF is hit.
+ """
+ _complain_ifclosed(self.closed)
+ r = self.readline()
+ if not r:
+ raise StopIteration
+ return r
+
+ def close(self):
+ """Free the memory buffer.
+ """
+ if not self.closed:
+ self.closed = True
+ del self.buf, self.pos
+
+ def isatty(self):
+ """Returns False because StringIO objects are not connected to a
+ tty-like device.
+ """
+ _complain_ifclosed(self.closed)
+ return False
+
+ def seek(self, pos, mode = 0):
+ """Set the file's current position.
+
+ The mode argument is optional and defaults to 0 (absolute file
+ positioning); other values are 1 (seek relative to the current
+ position) and 2 (seek relative to the file's end).
+
+ There is no return value.
+ """
+ _complain_ifclosed(self.closed)
+ if self.buflist:
+ self.buf += ''.join(self.buflist)
+ self.buflist = []
+ if mode == 1:
+ pos += self.pos
+ elif mode == 2:
+ pos += self.len
+ self.pos = max(0, pos)
+
+ def tell(self):
+ """Return the file's current position."""
+ _complain_ifclosed(self.closed)
+ return self.pos
+
+ def read(self, n = -1):
+ """Read at most size bytes from the file
+ (less if the read hits EOF before obtaining size bytes).
+
+ If the size argument is negative or omitted, read all data until EOF
+ is reached. The bytes are returned as a string object. An empty
+ string is returned when EOF is encountered immediately.
+ """
+ _complain_ifclosed(self.closed)
+ if self.buflist:
+ self.buf += ''.join(self.buflist)
+ self.buflist = []
+ if n < 0:
+ newpos = self.len
+ else:
+ newpos = min(self.pos+n, self.len)
+ r = self.buf[self.pos:newpos]
+ self.pos = newpos
+ return r
+
+ def readline(self, length=None):
+ r"""Read one entire line from the file.
+
+ A trailing newline character is kept in the string (but may be absent
+ when a file ends with an incomplete line). If the size argument is
+ present and non-negative, it is a maximum byte count (including the
+ trailing newline) and an incomplete line may be returned.
+
+ An empty string is returned only when EOF is encountered immediately.
+
+ Note: Unlike stdio's fgets(), the returned string contains null
+ characters ('\0') if they occurred in the input.
+ """
+ _complain_ifclosed(self.closed)
+ if self.buflist:
+ self.buf += ''.join(self.buflist)
+ self.buflist = []
+ i = self.buf.find('\n', self.pos)
+ if i < 0:
+ newpos = self.len
+ else:
+ newpos = i+1
+ if length is not None:
+ if self.pos + length < newpos:
+ newpos = self.pos + length
+ r = self.buf[self.pos:newpos]
+ self.pos = newpos
+ return r
+
+ def readlines(self, sizehint = 0):
+ """Read until EOF using readline() and return a list containing the
+ lines thus read.
+
+ If the optional sizehint argument is present, instead of reading up
+ to EOF, whole lines totalling approximately sizehint bytes (or more
+ to accommodate a final whole line).
+ """
+ total = 0
+ lines = []
+ line = self.readline()
+ while line:
+ lines.append(line)
+ total += len(line)
+ if 0 < sizehint <= total:
+ break
+ line = self.readline()
+ return lines
+
+ def truncate(self, size=None):
+ """Truncate the file's size.
+
+ If the optional size argument is present, the file is truncated to
+ (at most) that size. The size defaults to the current position.
+ The current file position is not changed unless the position
+ is beyond the new file size.
+
+ If the specified size exceeds the file's current size, the
+ file remains unchanged.
+ """
+ _complain_ifclosed(self.closed)
+ if size is None:
+ size = self.pos
+ elif size < 0:
+ raise IOError(EINVAL, "Negative size not allowed")
+ elif size < self.pos:
+ self.pos = size
+ self.buf = self.getvalue()[:size]
+ self.len = size
+
+ def write(self, s):
+ """Write a string to the file.
+
+ There is no return value.
+ """
+ _complain_ifclosed(self.closed)
+ if not s: return
+ # Force s to be a string or unicode
+ if not isinstance(s, basestring):
+ s = str(s)
+ spos = self.pos
+ slen = self.len
+ if spos == slen:
+ self.buflist.append(s)
+ self.len = self.pos = spos + len(s)
+ return
+ if spos > slen:
+ self.buflist.append('\0'*(spos - slen))
+ slen = spos
+ newpos = spos + len(s)
+ if spos < slen:
+ if self.buflist:
+ self.buf += ''.join(self.buflist)
+ self.buflist = [self.buf[:spos], s, self.buf[newpos:]]
+ self.buf = ''
+ if newpos > slen:
+ slen = newpos
+ else:
+ self.buflist.append(s)
+ slen = newpos
+ self.len = slen
+ self.pos = newpos
+
+ def writelines(self, iterable):
+ """Write a sequence of strings to the file. The sequence can be any
+ iterable object producing strings, typically a list of strings. There
+ is no return value.
+
+ (The name is intended to match readlines(); writelines() does not add
+ line separators.)
+ """
+ write = self.write
+ for line in iterable:
+ write(line)
+
+ def flush(self):
+ """Flush the internal buffer
+ """
+ _complain_ifclosed(self.closed)
+
+ def getvalue(self):
+ """
+ Retrieve the entire contents of the "file" at any time before
+ the StringIO object's close() method is called.
+
+ The StringIO object can accept either Unicode or 8-bit strings,
+ but mixing the two may take some care. If both are used, 8-bit
+ strings that cannot be interpreted as 7-bit ASCII (that use the
+ 8th bit) will cause a UnicodeError to be raised when getvalue()
+ is called.
+ """
+ if self.buflist:
+ self.buf += ''.join(self.buflist)
+ self.buflist = []
+ return self.buf
+
+
+# A little test suite
+
+def test():
+ import sys
+ if sys.argv[1:]:
+ file = sys.argv[1]
+ else:
+ file = '/etc/passwd'
+ lines = open(file, 'r').readlines()
+ text = open(file, 'r').read()
+ f = StringIO()
+ for line in lines[:-2]:
+ f.write(line)
+ f.writelines(lines[-2:])
+ if f.getvalue() != text:
+ raise RuntimeError, 'write failed'
+ length = f.tell()
+ print 'File length =', length
+ f.seek(len(lines[0]))
+ f.write(lines[1])
+ f.seek(0)
+ print 'First line =', repr(f.readline())
+ print 'Position =', f.tell()
+ line = f.readline()
+ print 'Second line =', repr(line)
+ f.seek(-len(line), 1)
+ line2 = f.read(len(line))
+ if line != line2:
+ raise RuntimeError, 'bad result after seek back'
+ f.seek(len(line2), 1)
+ list = f.readlines()
+ line = list[-1]
+ f.seek(f.tell() - len(line))
+ line2 = f.read()
+ if line != line2:
+ raise RuntimeError, 'bad result after seek back from EOF'
+ print 'Read', len(list), 'more lines'
+ print 'File length =', f.tell()
+ if f.tell() != length:
+ raise RuntimeError, 'bad length'
+ f.truncate(length/2)
+ f.seek(0, 2)
+ print 'Truncated length =', f.tell()
+ if f.tell() != length/2:
+ raise RuntimeError, 'truncate did not adjust length'
+ f.close()
+
+if __name__ == '__main__':
+ test()
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/UserDict.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,175 @@
+"""A more or less complete user-defined wrapper around dictionary objects."""
+
+class UserDict:
+ def __init__(self, dict=None, **kwargs):
+ self.data = {}
+ if dict is not None:
+ self.update(dict)
+ if len(kwargs):
+ self.update(kwargs)
+ def __repr__(self): return repr(self.data)
+ def __cmp__(self, dict):
+ if isinstance(dict, UserDict):
+ return cmp(self.data, dict.data)
+ else:
+ return cmp(self.data, dict)
+ def __len__(self): return len(self.data)
+ def __getitem__(self, key):
+ if key in self.data:
+ return self.data[key]
+ if hasattr(self.__class__, "__missing__"):
+ return self.__class__.__missing__(self, key)
+ raise KeyError(key)
+ def __setitem__(self, key, item): self.data[key] = item
+ def __delitem__(self, key): del self.data[key]
+ def clear(self): self.data.clear()
+ def copy(self):
+ if self.__class__ is UserDict:
+ return UserDict(self.data.copy())
+ import copy
+ data = self.data
+ try:
+ self.data = {}
+ c = copy.copy(self)
+ finally:
+ self.data = data
+ c.update(self)
+ return c
+ def keys(self): return self.data.keys()
+ def items(self): return self.data.items()
+ def iteritems(self): return self.data.iteritems()
+ def iterkeys(self): return self.data.iterkeys()
+ def itervalues(self): return self.data.itervalues()
+ def values(self): return self.data.values()
+ def has_key(self, key): return self.data.has_key(key)
+ def update(self, dict=None, **kwargs):
+ if dict is None:
+ pass
+ elif isinstance(dict, UserDict):
+ self.data.update(dict.data)
+ elif isinstance(dict, type({})) or not hasattr(dict, 'items'):
+ self.data.update(dict)
+ else:
+ for k, v in dict.items():
+ self[k] = v
+ if len(kwargs):
+ self.data.update(kwargs)
+ def get(self, key, failobj=None):
+ if not self.has_key(key):
+ return failobj
+ return self[key]
+ def setdefault(self, key, failobj=None):
+ if not self.has_key(key):
+ self[key] = failobj
+ return self[key]
+ def pop(self, key, *args):
+ return self.data.pop(key, *args)
+ def popitem(self):
+ return self.data.popitem()
+ def __contains__(self, key):
+ return key in self.data
+ @classmethod
+ def fromkeys(cls, iterable, value=None):
+ d = cls()
+ for key in iterable:
+ d[key] = value
+ return d
+
+class IterableUserDict(UserDict):
+ def __iter__(self):
+ return iter(self.data)
+
+class DictMixin:
+ # Mixin defining all dictionary methods for classes that already have
+ # a minimum dictionary interface including getitem, setitem, delitem,
+ # and keys. Without knowledge of the subclass constructor, the mixin
+ # does not define __init__() or copy(). In addition to the four base
+ # methods, progressively more efficiency comes with defining
+ # __contains__(), __iter__(), and iteritems().
+
+ # second level definitions support higher levels
+ def __iter__(self):
+ for k in self.keys():
+ yield k
+ def has_key(self, key):
+ try:
+ value = self[key]
+ except KeyError:
+ return False
+ return True
+ def __contains__(self, key):
+ return self.has_key(key)
+
+ # third level takes advantage of second level definitions
+ def iteritems(self):
+ for k in self:
+ yield (k, self[k])
+ def iterkeys(self):
+ return self.__iter__()
+
+ # fourth level uses definitions from lower levels
+ def itervalues(self):
+ for _, v in self.iteritems():
+ yield v
+ def values(self):
+ return [v for _, v in self.iteritems()]
+ def items(self):
+ return list(self.iteritems())
+ def clear(self):
+ for key in self.keys():
+ del self[key]
+ def setdefault(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ self[key] = default
+ return default
+ def pop(self, key, *args):
+ if len(args) > 1:
+ raise TypeError, "pop expected at most 2 arguments, got "\
+ + repr(1 + len(args))
+ try:
+ value = self[key]
+ except KeyError:
+ if args:
+ return args[0]
+ raise
+ del self[key]
+ return value
+ def popitem(self):
+ try:
+ k, v = self.iteritems().next()
+ except StopIteration:
+ raise KeyError, 'container is empty'
+ del self[k]
+ return (k, v)
+ def update(self, other=None, **kwargs):
+ # Make progressively weaker assumptions about "other"
+ if other is None:
+ pass
+ elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups
+ for k, v in other.iteritems():
+ self[k] = v
+ elif hasattr(other, 'keys'):
+ for k in other.keys():
+ self[k] = other[k]
+ else:
+ for k, v in other:
+ self[k] = v
+ if kwargs:
+ self.update(kwargs)
+ def get(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ return default
+ def __repr__(self):
+ return repr(dict(self.iteritems()))
+ def __cmp__(self, other):
+ if other is None:
+ return 1
+ if isinstance(other, DictMixin):
+ other = dict(other.iteritems())
+ return cmp(dict(self.iteritems()), other)
+ def __len__(self):
+ return len(self.keys())
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/UserList.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,85 @@
+"""A more or less complete user-defined wrapper around list objects."""
+
+class UserList:
+ def __init__(self, initlist=None):
+ self.data = []
+ if initlist is not None:
+ # XXX should this accept an arbitrary sequence?
+ if type(initlist) == type(self.data):
+ self.data[:] = initlist
+ elif isinstance(initlist, UserList):
+ self.data[:] = initlist.data[:]
+ else:
+ self.data = list(initlist)
+ def __repr__(self): return repr(self.data)
+ def __lt__(self, other): return self.data < self.__cast(other)
+ def __le__(self, other): return self.data <= self.__cast(other)
+ def __eq__(self, other): return self.data == self.__cast(other)
+ def __ne__(self, other): return self.data != self.__cast(other)
+ def __gt__(self, other): return self.data > self.__cast(other)
+ def __ge__(self, other): return self.data >= self.__cast(other)
+ def __cast(self, other):
+ if isinstance(other, UserList): return other.data
+ else: return other
+ def __cmp__(self, other):
+ return cmp(self.data, self.__cast(other))
+ def __contains__(self, item): return item in self.data
+ def __len__(self): return len(self.data)
+ def __getitem__(self, i): return self.data[i]
+ def __setitem__(self, i, item): self.data[i] = item
+ def __delitem__(self, i): del self.data[i]
+ def __getslice__(self, i, j):
+ i = max(i, 0); j = max(j, 0)
+ return self.__class__(self.data[i:j])
+ def __setslice__(self, i, j, other):
+ i = max(i, 0); j = max(j, 0)
+ if isinstance(other, UserList):
+ self.data[i:j] = other.data
+ elif isinstance(other, type(self.data)):
+ self.data[i:j] = other
+ else:
+ self.data[i:j] = list(other)
+ def __delslice__(self, i, j):
+ i = max(i, 0); j = max(j, 0)
+ del self.data[i:j]
+ def __add__(self, other):
+ if isinstance(other, UserList):
+ return self.__class__(self.data + other.data)
+ elif isinstance(other, type(self.data)):
+ return self.__class__(self.data + other)
+ else:
+ return self.__class__(self.data + list(other))
+ def __radd__(self, other):
+ if isinstance(other, UserList):
+ return self.__class__(other.data + self.data)
+ elif isinstance(other, type(self.data)):
+ return self.__class__(other + self.data)
+ else:
+ return self.__class__(list(other) + self.data)
+ def __iadd__(self, other):
+ if isinstance(other, UserList):
+ self.data += other.data
+ elif isinstance(other, type(self.data)):
+ self.data += other
+ else:
+ self.data += list(other)
+ return self
+ def __mul__(self, n):
+ return self.__class__(self.data*n)
+ __rmul__ = __mul__
+ def __imul__(self, n):
+ self.data *= n
+ return self
+ def append(self, item): self.data.append(item)
+ def insert(self, i, item): self.data.insert(i, item)
+ def pop(self, i=-1): return self.data.pop(i)
+ def remove(self, item): self.data.remove(item)
+ def count(self, item): return self.data.count(item)
+ def index(self, item, *args): return self.data.index(item, *args)
+ def reverse(self): self.data.reverse()
+ def sort(self, *args, **kwds): self.data.sort(*args, **kwds)
+ def extend(self, other):
+ if isinstance(other, UserList):
+ self.data.extend(other.data)
+ else:
+ self.data.extend(other)
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/UserString.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,194 @@
+#!/usr/bin/env python
+## vim:ts=4:et:nowrap
+"""A user-defined wrapper around string objects
+
+Note: string objects have grown methods in Python 1.6
+This module requires Python 1.6 or later.
+"""
+import sys
+
+__all__ = ["UserString","MutableString"]
+
+class UserString:
+ def __init__(self, seq):
+ if isinstance(seq, basestring):
+ self.data = seq
+ elif isinstance(seq, UserString):
+ self.data = seq.data[:]
+ else:
+ self.data = str(seq)
+ def __str__(self): return str(self.data)
+ def __repr__(self): return repr(self.data)
+ def __int__(self): return int(self.data)
+ def __long__(self): return long(self.data)
+ def __float__(self): return float(self.data)
+ def __complex__(self): return complex(self.data)
+ def __hash__(self): return hash(self.data)
+
+ def __cmp__(self, string):
+ if isinstance(string, UserString):
+ return cmp(self.data, string.data)
+ else:
+ return cmp(self.data, string)
+ def __contains__(self, char):
+ return char in self.data
+
+ def __len__(self): return len(self.data)
+ def __getitem__(self, index): return self.__class__(self.data[index])
+ def __getslice__(self, start, end):
+ start = max(start, 0); end = max(end, 0)
+ return self.__class__(self.data[start:end])
+
+ def __add__(self, other):
+ if isinstance(other, UserString):
+ return self.__class__(self.data + other.data)
+ elif isinstance(other, basestring):
+ return self.__class__(self.data + other)
+ else:
+ return self.__class__(self.data + str(other))
+ def __radd__(self, other):
+ if isinstance(other, basestring):
+ return self.__class__(other + self.data)
+ else:
+ return self.__class__(str(other) + self.data)
+ def __mul__(self, n):
+ return self.__class__(self.data*n)
+ __rmul__ = __mul__
+ def __mod__(self, args):
+ return self.__class__(self.data % args)
+
+ # the following methods are defined in alphabetical order:
+ def capitalize(self): return self.__class__(self.data.capitalize())
+ def center(self, width, *args):
+ return self.__class__(self.data.center(width, *args))
+ def count(self, sub, start=0, end=sys.maxint):
+ return self.data.count(sub, start, end)
+ def decode(self, encoding=None, errors=None): # XXX improve this?
+ if encoding:
+ if errors:
+ return self.__class__(self.data.decode(encoding, errors))
+ else:
+ return self.__class__(self.data.decode(encoding))
+ else:
+ return self.__class__(self.data.decode())
+ def encode(self, encoding=None, errors=None): # XXX improve this?
+ if encoding:
+ if errors:
+ return self.__class__(self.data.encode(encoding, errors))
+ else:
+ return self.__class__(self.data.encode(encoding))
+ else:
+ return self.__class__(self.data.encode())
+ def endswith(self, suffix, start=0, end=sys.maxint):
+ return self.data.endswith(suffix, start, end)
+ def expandtabs(self, tabsize=8):
+ return self.__class__(self.data.expandtabs(tabsize))
+ def find(self, sub, start=0, end=sys.maxint):
+ return self.data.find(sub, start, end)
+ def index(self, sub, start=0, end=sys.maxint):
+ return self.data.index(sub, start, end)
+ def isalpha(self): return self.data.isalpha()
+ def isalnum(self): return self.data.isalnum()
+ def isdecimal(self): return self.data.isdecimal()
+ def isdigit(self): return self.data.isdigit()
+ def islower(self): return self.data.islower()
+ def isnumeric(self): return self.data.isnumeric()
+ def isspace(self): return self.data.isspace()
+ def istitle(self): return self.data.istitle()
+ def isupper(self): return self.data.isupper()
+ def join(self, seq): return self.data.join(seq)
+ def ljust(self, width, *args):
+ return self.__class__(self.data.ljust(width, *args))
+ def lower(self): return self.__class__(self.data.lower())
+ def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
+ def partition(self, sep):
+ return self.data.partition(sep)
+ def replace(self, old, new, maxsplit=-1):
+ return self.__class__(self.data.replace(old, new, maxsplit))
+ def rfind(self, sub, start=0, end=sys.maxint):
+ return self.data.rfind(sub, start, end)
+ def rindex(self, sub, start=0, end=sys.maxint):
+ return self.data.rindex(sub, start, end)
+ def rjust(self, width, *args):
+ return self.__class__(self.data.rjust(width, *args))
+ def rpartition(self, sep):
+ return self.data.rpartition(sep)
+ def rstrip(self, chars=None): return self.__class__(self.data.rstrip(chars))
+ def split(self, sep=None, maxsplit=-1):
+ return self.data.split(sep, maxsplit)
+ def rsplit(self, sep=None, maxsplit=-1):
+ return self.data.rsplit(sep, maxsplit)
+ def splitlines(self, keepends=0): return self.data.splitlines(keepends)
+ def startswith(self, prefix, start=0, end=sys.maxint):
+ return self.data.startswith(prefix, start, end)
+ def strip(self, chars=None): return self.__class__(self.data.strip(chars))
+ def swapcase(self): return self.__class__(self.data.swapcase())
+ def title(self): return self.__class__(self.data.title())
+ def translate(self, *args):
+ return self.__class__(self.data.translate(*args))
+ def upper(self): return self.__class__(self.data.upper())
+ def zfill(self, width): return self.__class__(self.data.zfill(width))
+
+class MutableString(UserString):
+ """mutable string objects
+
+ Python strings are immutable objects. This has the advantage, that
+ strings may be used as dictionary keys. If this property isn't needed
+ and you insist on changing string values in place instead, you may cheat
+ and use MutableString.
+
+ But the purpose of this class is an educational one: to prevent
+ people from inventing their own mutable string class derived
+ from UserString and than forget thereby to remove (override) the
+ __hash__ method inherited from UserString. This would lead to
+ errors that would be very hard to track down.
+
+ A faster and better solution is to rewrite your program using lists."""
+ def __init__(self, string=""):
+ self.data = string
+ def __hash__(self):
+ raise TypeError, "unhashable type (it is mutable)"
+ def __setitem__(self, index, sub):
+ if index < 0:
+ index += len(self.data)
+ if index < 0 or index >= len(self.data): raise IndexError
+ self.data = self.data[:index] + sub + self.data[index+1:]
+ def __delitem__(self, index):
+ if index < 0:
+ index += len(self.data)
+ if index < 0 or index >= len(self.data): raise IndexError
+ self.data = self.data[:index] + self.data[index+1:]
+ def __setslice__(self, start, end, sub):
+ start = max(start, 0); end = max(end, 0)
+ if isinstance(sub, UserString):
+ self.data = self.data[:start]+sub.data+self.data[end:]
+ elif isinstance(sub, basestring):
+ self.data = self.data[:start]+sub+self.data[end:]
+ else:
+ self.data = self.data[:start]+str(sub)+self.data[end:]
+ def __delslice__(self, start, end):
+ start = max(start, 0); end = max(end, 0)
+ self.data = self.data[:start] + self.data[end:]
+ def immutable(self):
+ return UserString(self.data)
+ def __iadd__(self, other):
+ if isinstance(other, UserString):
+ self.data += other.data
+ elif isinstance(other, basestring):
+ self.data += other
+ else:
+ self.data += str(other)
+ return self
+ def __imul__(self, n):
+ self.data *= n
+ return self
+
+if __name__ == "__main__":
+ # execute the regression test to stdout, if called as a script:
+ import os
+ called_in_dir, called_as = os.path.split(sys.argv[0])
+ called_as, py = os.path.splitext(called_as)
+ if '-q' in sys.argv:
+ from test import test_support
+ test_support.verbose = 0
+ __import__('test.test_' + called_as.lower())
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/_LWPCookieJar.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,170 @@
+"""Load / save to libwww-perl (LWP) format files.
+
+Actually, the format is slightly extended from that used by LWP's
+(libwww-perl's) HTTP::Cookies, to avoid losing some RFC 2965 information
+not recorded by LWP.
+
+It uses the version string "2.0", though really there isn't an LWP Cookies
+2.0 format. This indicates that there is extra information in here
+(domain_dot and # port_spec) while still being compatible with
+libwww-perl, I hope.
+
+"""
+
+import time, re
+from cookielib import (_warn_unhandled_exception, FileCookieJar, LoadError,
+ Cookie, MISSING_FILENAME_TEXT,
+ join_header_words, split_header_words,
+ iso2time, time2isoz)
+
+def lwp_cookie_str(cookie):
+ """Return string representation of Cookie in an the LWP cookie file format.
+
+ Actually, the format is extended a bit -- see module docstring.
+
+ """
+ h = [(cookie.name, cookie.value),
+ ("path", cookie.path),
+ ("domain", cookie.domain)]
+ if cookie.port is not None: h.append(("port", cookie.port))
+ if cookie.path_specified: h.append(("path_spec", None))
+ if cookie.port_specified: h.append(("port_spec", None))
+ if cookie.domain_initial_dot: h.append(("domain_dot", None))
+ if cookie.secure: h.append(("secure", None))
+ if cookie.expires: h.append(("expires",
+ time2isoz(float(cookie.expires))))
+ if cookie.discard: h.append(("discard", None))
+ if cookie.comment: h.append(("comment", cookie.comment))
+ if cookie.comment_url: h.append(("commenturl", cookie.comment_url))
+
+ keys = cookie._rest.keys()
+ keys.sort()
+ for k in keys:
+ h.append((k, str(cookie._rest[k])))
+
+ h.append(("version", str(cookie.version)))
+
+ return join_header_words([h])
+
+class LWPCookieJar(FileCookieJar):
+ """
+ The LWPCookieJar saves a sequence of"Set-Cookie3" lines.
+ "Set-Cookie3" is the format used by the libwww-perl libary, not known
+ to be compatible with any browser, but which is easy to read and
+ doesn't lose information about RFC 2965 cookies.
+
+ Additional methods
+
+ as_lwp_str(ignore_discard=True, ignore_expired=True)
+
+ """
+
+ def as_lwp_str(self, ignore_discard=True, ignore_expires=True):
+ """Return cookies as a string of "\n"-separated "Set-Cookie3" headers.
+
+ ignore_discard and ignore_expires: see docstring for FileCookieJar.save
+
+ """
+ now = time.time()
+ r = []
+ for cookie in self:
+ if not ignore_discard and cookie.discard:
+ continue
+ if not ignore_expires and cookie.is_expired(now):
+ continue
+ r.append("Set-Cookie3: %s" % lwp_cookie_str(cookie))
+ return "\n".join(r+[""])
+
+ def save(self, filename=None, ignore_discard=False, ignore_expires=False):
+ if filename is None:
+ if self.filename is not None: filename = self.filename
+ else: raise ValueError(MISSING_FILENAME_TEXT)
+
+ f = open(filename, "w")
+ try:
+ # There really isn't an LWP Cookies 2.0 format, but this indicates
+ # that there is extra information in here (domain_dot and
+ # port_spec) while still being compatible with libwww-perl, I hope.
+ f.write("#LWP-Cookies-2.0\n")
+ f.write(self.as_lwp_str(ignore_discard, ignore_expires))
+ finally:
+ f.close()
+
+ def _really_load(self, f, filename, ignore_discard, ignore_expires):
+ magic = f.readline()
+ if not re.search(self.magic_re, magic):
+ msg = ("%r does not look like a Set-Cookie3 (LWP) format "
+ "file" % filename)
+ raise LoadError(msg)
+
+ now = time.time()
+
+ header = "Set-Cookie3:"
+ boolean_attrs = ("port_spec", "path_spec", "domain_dot",
+ "secure", "discard")
+ value_attrs = ("version",
+ "port", "path", "domain",
+ "expires",
+ "comment", "commenturl")
+
+ try:
+ while 1:
+ line = f.readline()
+ if line == "": break
+ if not line.startswith(header):
+ continue
+ line = line[len(header):].strip()
+
+ for data in split_header_words([line]):
+ name, value = data[0]
+ standard = {}
+ rest = {}
+ for k in boolean_attrs:
+ standard[k] = False
+ for k, v in data[1:]:
+ if k is not None:
+ lc = k.lower()
+ else:
+ lc = None
+ # don't lose case distinction for unknown fields
+ if (lc in value_attrs) or (lc in boolean_attrs):
+ k = lc
+ if k in boolean_attrs:
+ if v is None: v = True
+ standard[k] = v
+ elif k in value_attrs:
+ standard[k] = v
+ else:
+ rest[k] = v
+
+ h = standard.get
+ expires = h("expires")
+ discard = h("discard")
+ if expires is not None:
+ expires = iso2time(expires)
+ if expires is None:
+ discard = True
+ domain = h("domain")
+ domain_specified = domain.startswith(".")
+ c = Cookie(h("version"), name, value,
+ h("port"), h("port_spec"),
+ domain, domain_specified, h("domain_dot"),
+ h("path"), h("path_spec"),
+ h("secure"),
+ expires,
+ discard,
+ h("comment"),
+ h("commenturl"),
+ rest)
+ if not ignore_discard and c.discard:
+ continue
+ if not ignore_expires and c.is_expired(now):
+ continue
+ self.set_cookie(c)
+
+ except IOError:
+ raise
+ except Exception:
+ _warn_unhandled_exception()
+ raise LoadError("invalid Set-Cookie3 format file %r: %r" %
+ (filename, line))
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/_MozillaCookieJar.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,149 @@
+"""Mozilla / Netscape cookie loading / saving."""
+
+import re, time
+
+from cookielib import (_warn_unhandled_exception, FileCookieJar, LoadError,
+ Cookie, MISSING_FILENAME_TEXT)
+
+class MozillaCookieJar(FileCookieJar):
+ """
+
+ WARNING: you may want to backup your browser's cookies file if you use
+ this class to save cookies. I *think* it works, but there have been
+ bugs in the past!
+
+ This class differs from CookieJar only in the format it uses to save and
+ load cookies to and from a file. This class uses the Mozilla/Netscape
+ `cookies.txt' format. lynx uses this file format, too.
+
+ Don't expect cookies saved while the browser is running to be noticed by
+ the browser (in fact, Mozilla on unix will overwrite your saved cookies if
+ you change them on disk while it's running; on Windows, you probably can't
+ save at all while the browser is running).
+
+ Note that the Mozilla/Netscape format will downgrade RFC2965 cookies to
+ Netscape cookies on saving.
+
+ In particular, the cookie version and port number information is lost,
+ together with information about whether or not Path, Port and Discard were
+ specified by the Set-Cookie2 (or Set-Cookie) header, and whether or not the
+ domain as set in the HTTP header started with a dot (yes, I'm aware some
+ domains in Netscape files start with a dot and some don't -- trust me, you
+ really don't want to know any more about this).
+
+ Note that though Mozilla and Netscape use the same format, they use
+ slightly different headers. The class saves cookies using the Netscape
+ header by default (Mozilla can cope with that).
+
+ """
+ magic_re = "#( Netscape)? HTTP Cookie File"
+ header = """\
+ # Netscape HTTP Cookie File
+ # http://www.netscape.com/newsref/std/cookie_spec.html
+ # This is a generated file! Do not edit.
+
+"""
+
+ def _really_load(self, f, filename, ignore_discard, ignore_expires):
+ now = time.time()
+
+ magic = f.readline()
+ if not re.search(self.magic_re, magic):
+ f.close()
+ raise LoadError(
+ "%r does not look like a Netscape format cookies file" %
+ filename)
+
+ try:
+ while 1:
+ line = f.readline()
+ if line == "": break
+
+ # last field may be absent, so keep any trailing tab
+ if line.endswith("\n"): line = line[:-1]
+
+ # skip comments and blank lines XXX what is $ for?
+ if (line.strip().startswith(("#", "$")) or
+ line.strip() == ""):
+ continue
+
+ domain, domain_specified, path, secure, expires, name, value = \
+ line.split("\t")
+ secure = (secure == "TRUE")
+ domain_specified = (domain_specified == "TRUE")
+ if name == "":
+ # cookies.txt regards 'Set-Cookie: foo' as a cookie
+ # with no name, whereas cookielib regards it as a
+ # cookie with no value.
+ name = value
+ value = None
+
+ initial_dot = domain.startswith(".")
+ assert domain_specified == initial_dot
+
+ discard = False
+ if expires == "":
+ expires = None
+ discard = True
+
+ # assume path_specified is false
+ c = Cookie(0, name, value,
+ None, False,
+ domain, domain_specified, initial_dot,
+ path, False,
+ secure,
+ expires,
+ discard,
+ None,
+ None,
+ {})
+ if not ignore_discard and c.discard:
+ continue
+ if not ignore_expires and c.is_expired(now):
+ continue
+ self.set_cookie(c)
+
+ except IOError:
+ raise
+ except Exception:
+ _warn_unhandled_exception()
+ raise LoadError("invalid Netscape format cookies file %r: %r" %
+ (filename, line))
+
+ def save(self, filename=None, ignore_discard=False, ignore_expires=False):
+ if filename is None:
+ if self.filename is not None: filename = self.filename
+ else: raise ValueError(MISSING_FILENAME_TEXT)
+
+ f = open(filename, "w")
+ try:
+ f.write(self.header)
+ now = time.time()
+ for cookie in self:
+ if not ignore_discard and cookie.discard:
+ continue
+ if not ignore_expires and cookie.is_expired(now):
+ continue
+ if cookie.secure: secure = "TRUE"
+ else: secure = "FALSE"
+ if cookie.domain.startswith("."): initial_dot = "TRUE"
+ else: initial_dot = "FALSE"
+ if cookie.expires is not None:
+ expires = str(cookie.expires)
+ else:
+ expires = ""
+ if cookie.value is None:
+ # cookies.txt regards 'Set-Cookie: foo' as a cookie
+ # with no name, whereas cookielib regards it as a
+ # cookie with no value.
+ name = ""
+ value = cookie.name
+ else:
+ name = cookie.name
+ value = cookie.value
+ f.write(
+ "\t".join([cookie.domain, initial_dot, cookie.path,
+ secure, expires, name, value])+
+ "\n")
+ finally:
+ f.close()
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/__future__.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,116 @@
+"""Record of phased-in incompatible language changes.
+
+Each line is of the form:
+
+ FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease ","
+ CompilerFlag ")"
+
+where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples
+of the same form as sys.version_info:
+
+ (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int
+ PY_MINOR_VERSION, # the 1; an int
+ PY_MICRO_VERSION, # the 0; an int
+ PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string
+ PY_RELEASE_SERIAL # the 3; an int
+ )
+
+OptionalRelease records the first release in which
+
+ from __future__ import FeatureName
+
+was accepted.
+
+In the case of MandatoryReleases that have not yet occurred,
+MandatoryRelease predicts the release in which the feature will become part
+of the language.
+
+Else MandatoryRelease records when the feature became part of the language;
+in releases at or after that, modules no longer need
+
+ from __future__ import FeatureName
+
+to use the feature in question, but may continue to use such imports.
+
+MandatoryRelease may also be None, meaning that a planned feature got
+dropped.
+
+Instances of class _Feature have two corresponding methods,
+.getOptionalRelease() and .getMandatoryRelease().
+
+CompilerFlag is the (bitfield) flag that should be passed in the fourth
+argument to the builtin function compile() to enable the feature in
+dynamically compiled code. This flag is stored in the .compiler_flag
+attribute on _Future instances. These values must match the appropriate
+#defines of CO_xxx flags in Include/compile.h.
+
+No feature line is ever to be deleted from this file.
+"""
+
+all_feature_names = [
+ "nested_scopes",
+ "generators",
+ "division",
+ "absolute_import",
+ "with_statement",
+]
+
+__all__ = ["all_feature_names"] + all_feature_names
+
+# The CO_xxx symbols are defined here under the same names used by
+# compile.h, so that an editor search will find them here. However,
+# they're not exported in __all__, because they don't really belong to
+# this module.
+CO_NESTED = 0x0010 # nested_scopes
+CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000)
+CO_FUTURE_DIVISION = 0x2000 # division
+CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default
+CO_FUTURE_WITH_STATEMENT = 0x8000 # with statement
+
+class _Feature:
+ def __init__(self, optionalRelease, mandatoryRelease, compiler_flag):
+ self.optional = optionalRelease
+ self.mandatory = mandatoryRelease
+ self.compiler_flag = compiler_flag
+
+ def getOptionalRelease(self):
+ """Return first release in which this feature was recognized.
+
+ This is a 5-tuple, of the same form as sys.version_info.
+ """
+
+ return self.optional
+
+ def getMandatoryRelease(self):
+ """Return release in which this feature will become mandatory.
+
+ This is a 5-tuple, of the same form as sys.version_info, or, if
+ the feature was dropped, is None.
+ """
+
+ return self.mandatory
+
+ def __repr__(self):
+ return "_Feature" + repr((self.optional,
+ self.mandatory,
+ self.compiler_flag))
+
+nested_scopes = _Feature((2, 1, 0, "beta", 1),
+ (2, 2, 0, "alpha", 0),
+ CO_NESTED)
+
+generators = _Feature((2, 2, 0, "alpha", 1),
+ (2, 3, 0, "final", 0),
+ CO_GENERATOR_ALLOWED)
+
+division = _Feature((2, 2, 0, "alpha", 2),
+ (3, 0, 0, "alpha", 0),
+ CO_FUTURE_DIVISION)
+
+absolute_import = _Feature((2, 5, 0, "alpha", 1),
+ (2, 7, 0, "alpha", 0),
+ CO_FUTURE_ABSOLUTE_IMPORT)
+
+with_statement = _Feature((2, 5, 0, "alpha", 1),
+ (2, 6, 0, "alpha", 0),
+ CO_FUTURE_WITH_STATEMENT)
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/__phello__.foo.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1 @@
+# This file exists as a helper for the test.test_frozen module.
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/_strptime.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,452 @@
+"""Strptime-related classes and functions.
+
+CLASSES:
+ LocaleTime -- Discovers and stores locale-specific time information
+ TimeRE -- Creates regexes for pattern matching a string of text containing
+ time information
+
+FUNCTIONS:
+ _getlang -- Figure out what language is being used for the locale
+ strptime -- Calculates the time struct represented by the passed-in string
+
+"""
+import time
+import locale
+import calendar
+from re import compile as re_compile
+from re import IGNORECASE
+from re import escape as re_escape
+from datetime import date as datetime_date
+try:
+ from thread import allocate_lock as _thread_allocate_lock
+except:
+ from dummy_thread import allocate_lock as _thread_allocate_lock
+
+__author__ = "Brett Cannon"
+__email__ = "brett@python.org"
+
+__all__ = ['strptime']
+
+def _getlang():
+ # Figure out what the current language is set to.
+ return locale.getlocale(locale.LC_TIME)
+
+class LocaleTime(object):
+ """Stores and handles locale-specific information related to time.
+
+ ATTRIBUTES:
+ f_weekday -- full weekday names (7-item list)
+ a_weekday -- abbreviated weekday names (7-item list)
+ f_month -- full month names (13-item list; dummy value in [0], which
+ is added by code)
+ a_month -- abbreviated month names (13-item list, dummy value in
+ [0], which is added by code)
+ am_pm -- AM/PM representation (2-item list)
+ LC_date_time -- format string for date/time representation (string)
+ LC_date -- format string for date representation (string)
+ LC_time -- format string for time representation (string)
+ timezone -- daylight- and non-daylight-savings timezone representation
+ (2-item list of sets)
+ lang -- Language used by instance (2-item tuple)
+ """
+
+ def __init__(self):
+ """Set all attributes.
+
+ Order of methods called matters for dependency reasons.
+
+ The locale language is set at the offset and then checked again before
+ exiting. This is to make sure that the attributes were not set with a
+ mix of information from more than one locale. This would most likely
+ happen when using threads where one thread calls a locale-dependent
+ function while another thread changes the locale while the function in
+ the other thread is still running. Proper coding would call for
+ locks to prevent changing the locale while locale-dependent code is
+ running. The check here is done in case someone does not think about
+ doing this.
+
+ Only other possible issue is if someone changed the timezone and did
+ not call tz.tzset . That is an issue for the programmer, though,
+ since changing the timezone is worthless without that call.
+
+ """
+ self.lang = _getlang()
+ self.__calc_weekday()
+ self.__calc_month()
+ self.__calc_am_pm()
+ self.__calc_timezone()
+ self.__calc_date_time()
+ if _getlang() != self.lang:
+ raise ValueError("locale changed during initialization")
+
+ def __pad(self, seq, front):
+ # Add '' to seq to either the front (is True), else the back.
+ seq = list(seq)
+ if front:
+ seq.insert(0, '')
+ else:
+ seq.append('')
+ return seq
+
+ def __calc_weekday(self):
+ # Set self.a_weekday and self.f_weekday using the calendar
+ # module.
+ a_weekday = [calendar.day_abbr[i].lower() for i in range(7)]
+ f_weekday = [calendar.day_name[i].lower() for i in range(7)]
+ self.a_weekday = a_weekday
+ self.f_weekday = f_weekday
+
+ def __calc_month(self):
+ # Set self.f_month and self.a_month using the calendar module.
+ a_month = [calendar.month_abbr[i].lower() for i in range(13)]
+ f_month = [calendar.month_name[i].lower() for i in range(13)]
+ self.a_month = a_month
+ self.f_month = f_month
+
+ def __calc_am_pm(self):
+ # Set self.am_pm by using time.strftime().
+
+ # The magic date (1999,3,17,hour,44,55,2,76,0) is not really that
+ # magical; just happened to have used it everywhere else where a
+ # static date was needed.
+ am_pm = []
+ for hour in (01,22):
+ time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0))
+ am_pm.append(time.strftime("%p", time_tuple).lower())
+ self.am_pm = am_pm
+
+ def __calc_date_time(self):
+ # Set self.date_time, self.date, & self.time by using
+ # time.strftime().
+
+ # Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of
+ # overloaded numbers is minimized. The order in which searches for
+ # values within the format string is very important; it eliminates
+ # possible ambiguity for what something represents.
+ time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0))
+ date_time = [None, None, None]
+ date_time[0] = time.strftime("%c", time_tuple).lower()
+ date_time[1] = time.strftime("%x", time_tuple).lower()
+ date_time[2] = time.strftime("%X", time_tuple).lower()
+ replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'),
+ (self.f_month[3], '%B'), (self.a_weekday[2], '%a'),
+ (self.a_month[3], '%b'), (self.am_pm[1], '%p'),
+ ('1999', '%Y'), ('99', '%y'), ('22', '%H'),
+ ('44', '%M'), ('55', '%S'), ('76', '%j'),
+ ('17', '%d'), ('03', '%m'), ('3', '%m'),
+ # '3' needed for when no leading zero.
+ ('2', '%w'), ('10', '%I')]
+ replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone
+ for tz in tz_values])
+ for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')):
+ current_format = date_time[offset]
+ for old, new in replacement_pairs:
+ # Must deal with possible lack of locale info
+ # manifesting itself as the empty string (e.g., Swedish's
+ # lack of AM/PM info) or a platform returning a tuple of empty
+ # strings (e.g., MacOS 9 having timezone as ('','')).
+ if old:
+ current_format = current_format.replace(old, new)
+ # If %W is used, then Sunday, 2005-01-03 will fall on week 0 since
+ # 2005-01-03 occurs before the first Monday of the year. Otherwise
+ # %U is used.
+ time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0))
+ if '00' in time.strftime(directive, time_tuple):
+ U_W = '%W'
+ else:
+ U_W = '%U'
+ date_time[offset] = current_format.replace('11', U_W)
+ self.LC_date_time = date_time[0]
+ self.LC_date = date_time[1]
+ self.LC_time = date_time[2]
+
+ def __calc_timezone(self):
+ # Set self.timezone by using time.tzname.
+ # Do not worry about possibility of time.tzname[0] == timetzname[1]
+ # and time.daylight; handle that in strptime .
+ try:
+ time.tzset()
+ except AttributeError:
+ pass
+ no_saving = frozenset(["utc", "gmt", time.tzname[0].lower()])
+ if time.daylight:
+ has_saving = frozenset([time.tzname[1].lower()])
+ else:
+ has_saving = frozenset()
+ self.timezone = (no_saving, has_saving)
+
+
+class TimeRE(dict):
+ """Handle conversion from format directives to regexes."""
+
+ def __init__(self, locale_time=None):
+ """Create keys/values.
+
+ Order of execution is important for dependency reasons.
+
+ """
+ if locale_time:
+ self.locale_time = locale_time
+ else:
+ self.locale_time = LocaleTime()
+ base = super(TimeRE, self)
+ base.__init__({
+ # The " \d" part of the regex is to make %c from ANSI C work
+ 'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
+ 'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
+ 'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])",
+ 'j': r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])",
+ 'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
+ 'M': r"(?P<M>[0-5]\d|\d)",
+ 'S': r"(?P<S>6[0-1]|[0-5]\d|\d)",
+ 'U': r"(?P<U>5[0-3]|[0-4]\d|\d)",
+ 'w': r"(?P<w>[0-6])",
+ # W is set below by using 'U'
+ 'y': r"(?P<y>\d\d)",
+ #XXX: Does 'Y' need to worry about having less or more than
+ # 4 digits?
+ 'Y': r"(?P<Y>\d\d\d\d)",
+ 'A': self.__seqToRE(self.locale_time.f_weekday, 'A'),
+ 'a': self.__seqToRE(self.locale_time.a_weekday, 'a'),
+ 'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
+ 'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'),
+ 'p': self.__seqToRE(self.locale_time.am_pm, 'p'),
+ 'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone
+ for tz in tz_names),
+ 'Z'),
+ '%': '%'})
+ base.__setitem__('W', base.__getitem__('U').replace('U', 'W'))
+ base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
+ base.__setitem__('x', self.pattern(self.locale_time.LC_date))
+ base.__setitem__('X', self.pattern(self.locale_time.LC_time))
+
+ def __seqToRE(self, to_convert, directive):
+ """Convert a list to a regex string for matching a directive.
+
+ Want possible matching values to be from longest to shortest. This
+ prevents the possibility of a match occuring for a value that also
+ a substring of a larger value that should have matched (e.g., 'abc'
+ matching when 'abcdef' should have been the match).
+
+ """
+ to_convert = sorted(to_convert, key=len, reverse=True)
+ for value in to_convert:
+ if value != '':
+ break
+ else:
+ return ''
+ regex = '|'.join(re_escape(stuff) for stuff in to_convert)
+ regex = '(?P<%s>%s' % (directive, regex)
+ return '%s)' % regex
+
+ def pattern(self, format):
+ """Return regex pattern for the format string.
+
+ Need to make sure that any characters that might be interpreted as
+ regex syntax are escaped.
+
+ """
+ processed_format = ''
+ # The sub() call escapes all characters that might be misconstrued
+ # as regex syntax. Cannot use re.escape since we have to deal with
+ # format directives (%m, etc.).
+ regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])")
+ format = regex_chars.sub(r"\\\1", format)
+ whitespace_replacement = re_compile('\s+')
+ format = whitespace_replacement.sub('\s*', format)
+ while '%' in format:
+ directive_index = format.index('%')+1
+ processed_format = "%s%s%s" % (processed_format,
+ format[:directive_index-1],
+ self[format[directive_index]])
+ format = format[directive_index+1:]
+ return "%s%s" % (processed_format, format)
+
+ def compile(self, format):
+ """Return a compiled re object for the format string."""
+ return re_compile(self.pattern(format), IGNORECASE)
+
+_cache_lock = _thread_allocate_lock()
+# DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock
+# first!
+_TimeRE_cache = TimeRE()
+_CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache
+_regex_cache = {}
+
+def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon):
+ """Calculate the Julian day based on the year, week of the year, and day of
+ the week, with week_start_day representing whether the week of the year
+ assumes the week starts on Sunday or Monday (6 or 0)."""
+ first_weekday = datetime_date(year, 1, 1).weekday()
+ # If we are dealing with the %U directive (week starts on Sunday), it's
+ # easier to just shift the view to Sunday being the first day of the
+ # week.
+ if not week_starts_Mon:
+ first_weekday = (first_weekday + 1) % 7
+ day_of_week = (day_of_week + 1) % 7
+ # Need to watch out for a week 0 (when the first day of the year is not
+ # the same as that specified by %U or %W).
+ week_0_length = (7 - first_weekday) % 7
+ if week_of_year == 0:
+ return 1 + day_of_week - first_weekday
+ else:
+ days_to_week = week_0_length + (7 * (week_of_year - 1))
+ return 1 + days_to_week + day_of_week
+
+
+def strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
+ """Return a time struct based on the input string and the format string."""
+ global _TimeRE_cache, _regex_cache
+ _cache_lock.acquire()
+ try:
+ time_re = _TimeRE_cache
+ locale_time = time_re.locale_time
+ if _getlang() != locale_time.lang:
+ _TimeRE_cache = TimeRE()
+ _regex_cache = {}
+ if len(_regex_cache) > _CACHE_MAX_SIZE:
+ _regex_cache.clear()
+ format_regex = _regex_cache.get(format)
+ if not format_regex:
+ try:
+ format_regex = time_re.compile(format)
+ # KeyError raised when a bad format is found; can be specified as
+ # \\, in which case it was a stray % but with a space after it
+ except KeyError, err:
+ bad_directive = err.args[0]
+ if bad_directive == "\\":
+ bad_directive = "%"
+ del err
+ raise ValueError("'%s' is a bad directive in format '%s'" %
+ (bad_directive, format))
+ # IndexError only occurs when the format string is "%"
+ except IndexError:
+ raise ValueError("stray %% in format '%s'" % format)
+ _regex_cache[format] = format_regex
+ finally:
+ _cache_lock.release()
+ found = format_regex.match(data_string)
+ if not found:
+ raise ValueError("time data did not match format: data=%s fmt=%s" %
+ (data_string, format))
+ if len(data_string) != found.end():
+ raise ValueError("unconverted data remains: %s" %
+ data_string[found.end():])
+ year = 1900
+ month = day = 1
+ hour = minute = second = 0
+ tz = -1
+ # Default to -1 to signify that values not known; not critical to have,
+ # though
+ week_of_year = -1
+ week_of_year_start = -1
+ # weekday and julian defaulted to -1 so as to signal need to calculate
+ # values
+ weekday = julian = -1
+ found_dict = found.groupdict()
+ for group_key in found_dict.iterkeys():
+ # Directives not explicitly handled below:
+ # c, x, X
+ # handled by making out of other directives
+ # U, W
+ # worthless without day of the week
+ if group_key == 'y':
+ year = int(found_dict['y'])
+ # Open Group specification for strptime() states that a %y
+ #value in the range of [00, 68] is in the century 2000, while
+ #[69,99] is in the century 1900
+ if year <= 68:
+ year += 2000
+ else:
+ year += 1900
+ elif group_key == 'Y':
+ year = int(found_dict['Y'])
+ elif group_key == 'm':
+ month = int(found_dict['m'])
+ elif group_key == 'B':
+ month = locale_time.f_month.index(found_dict['B'].lower())
+ elif group_key == 'b':
+ month = locale_time.a_month.index(found_dict['b'].lower())
+ elif group_key == 'd':
+ day = int(found_dict['d'])
+ elif group_key == 'H':
+ hour = int(found_dict['H'])
+ elif group_key == 'I':
+ hour = int(found_dict['I'])
+ ampm = found_dict.get('p', '').lower()
+ # If there was no AM/PM indicator, we'll treat this like AM
+ if ampm in ('', locale_time.am_pm[0]):
+ # We're in AM so the hour is correct unless we're
+ # looking at 12 midnight.
+ # 12 midnight == 12 AM == hour 0
+ if hour == 12:
+ hour = 0
+ elif ampm == locale_time.am_pm[1]:
+ # We're in PM so we need to add 12 to the hour unless
+ # we're looking at 12 noon.
+ # 12 noon == 12 PM == hour 12
+ if hour != 12:
+ hour += 12
+ elif group_key == 'M':
+ minute = int(found_dict['M'])
+ elif group_key == 'S':
+ second = int(found_dict['S'])
+ elif group_key == 'A':
+ weekday = locale_time.f_weekday.index(found_dict['A'].lower())
+ elif group_key == 'a':
+ weekday = locale_time.a_weekday.index(found_dict['a'].lower())
+ elif group_key == 'w':
+ weekday = int(found_dict['w'])
+ if weekday == 0:
+ weekday = 6
+ else:
+ weekday -= 1
+ elif group_key == 'j':
+ julian = int(found_dict['j'])
+ elif group_key in ('U', 'W'):
+ week_of_year = int(found_dict[group_key])
+ if group_key == 'U':
+ # U starts week on Sunday.
+ week_of_year_start = 6
+ else:
+ # W starts week on Monday.
+ week_of_year_start = 0
+ elif group_key == 'Z':
+ # Since -1 is default value only need to worry about setting tz if
+ # it can be something other than -1.
+ found_zone = found_dict['Z'].lower()
+ for value, tz_values in enumerate(locale_time.timezone):
+ if found_zone in tz_values:
+ # Deal with bad locale setup where timezone names are the
+ # same and yet time.daylight is true; too ambiguous to
+ # be able to tell what timezone has daylight savings
+ if (time.tzname[0] == time.tzname[1] and
+ time.daylight and found_zone not in ("utc", "gmt")):
+ break
+ else:
+ tz = value
+ break
+ # If we know the week of the year and what day of that week, we can figure
+ # out the Julian day of the year.
+ if julian == -1 and week_of_year != -1 and weekday != -1:
+ week_starts_Mon = True if week_of_year_start == 0 else False
+ julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
+ week_starts_Mon)
+ # Cannot pre-calculate datetime_date() since can change in Julian
+ # calculation and thus could have different value for the day of the week
+ # calculation.
+ if julian == -1:
+ # Need to add 1 to result since first day of the year is 1, not 0.
+ julian = datetime_date(year, month, day).toordinal() - \
+ datetime_date(year, 1, 1).toordinal() + 1
+ else: # Assume that if they bothered to include Julian day it will
+ # be accurate.
+ datetime_result = datetime_date.fromordinal((julian - 1) + datetime_date(year, 1, 1).toordinal())
+ year = datetime_result.year
+ month = datetime_result.month
+ day = datetime_result.day
+ if weekday == -1:
+ weekday = datetime_date(year, month, day).weekday()
+ return time.struct_time((year, month, day,
+ hour, minute, second,
+ weekday, julian, tz))
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/_threading_local.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,241 @@
+"""Thread-local objects.
+
+(Note that this module provides a Python version of the threading.local
+ class. Depending on the version of Python you're using, there may be a
+ faster one available. You should always import the `local` class from
+ `threading`.)
+
+Thread-local objects support the management of thread-local data.
+If you have data that you want to be local to a thread, simply create
+a thread-local object and use its attributes:
+
+ >>> mydata = local()
+ >>> mydata.number = 42
+ >>> mydata.number
+ 42
+
+You can also access the local-object's dictionary:
+
+ >>> mydata.__dict__
+ {'number': 42}
+ >>> mydata.__dict__.setdefault('widgets', [])
+ []
+ >>> mydata.widgets
+ []
+
+What's important about thread-local objects is that their data are
+local to a thread. If we access the data in a different thread:
+
+ >>> log = []
+ >>> def f():
+ ... items = mydata.__dict__.items()
+ ... items.sort()
+ ... log.append(items)
+ ... mydata.number = 11
+ ... log.append(mydata.number)
+
+ >>> import threading
+ >>> thread = threading.Thread(target=f)
+ >>> thread.start()
+ >>> thread.join()
+ >>> log
+ [[], 11]
+
+we get different data. Furthermore, changes made in the other thread
+don't affect data seen in this thread:
+
+ >>> mydata.number
+ 42
+
+Of course, values you get from a local object, including a __dict__
+attribute, are for whatever thread was current at the time the
+attribute was read. For that reason, you generally don't want to save
+these values across threads, as they apply only to the thread they
+came from.
+
+You can create custom local objects by subclassing the local class:
+
+ >>> class MyLocal(local):
+ ... number = 2
+ ... initialized = False
+ ... def __init__(self, **kw):
+ ... if self.initialized:
+ ... raise SystemError('__init__ called too many times')
+ ... self.initialized = True
+ ... self.__dict__.update(kw)
+ ... def squared(self):
+ ... return self.number ** 2
+
+This can be useful to support default values, methods and
+initialization. Note that if you define an __init__ method, it will be
+called each time the local object is used in a separate thread. This
+is necessary to initialize each thread's dictionary.
+
+Now if we create a local object:
+
+ >>> mydata = MyLocal(color='red')
+
+Now we have a default number:
+
+ >>> mydata.number
+ 2
+
+an initial color:
+
+ >>> mydata.color
+ 'red'
+ >>> del mydata.color
+
+And a method that operates on the data:
+
+ >>> mydata.squared()
+ 4
+
+As before, we can access the data in a separate thread:
+
+ >>> log = []
+ >>> thread = threading.Thread(target=f)
+ >>> thread.start()
+ >>> thread.join()
+ >>> log
+ [[('color', 'red'), ('initialized', True)], 11]
+
+without affecting this thread's data:
+
+ >>> mydata.number
+ 2
+ >>> mydata.color
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'MyLocal' object has no attribute 'color'
+
+Note that subclasses can define slots, but they are not thread
+local. They are shared across threads:
+
+ >>> class MyLocal(local):
+ ... __slots__ = 'number'
+
+ >>> mydata = MyLocal()
+ >>> mydata.number = 42
+ >>> mydata.color = 'red'
+
+So, the separate thread:
+
+ >>> thread = threading.Thread(target=f)
+ >>> thread.start()
+ >>> thread.join()
+
+affects what we see:
+
+ >>> mydata.number
+ 11
+
+>>> del mydata
+"""
+
+__all__ = ["local"]
+
+# We need to use objects from the threading module, but the threading
+# module may also want to use our `local` class, if support for locals
+# isn't compiled in to the `thread` module. This creates potential problems
+# with circular imports. For that reason, we don't import `threading`
+# until the bottom of this file (a hack sufficient to worm around the
+# potential problems). Note that almost all platforms do have support for
+# locals in the `thread` module, and there is no circular import problem
+# then, so problems introduced by fiddling the order of imports here won't
+# manifest on most boxes.
+
+class _localbase(object):
+ __slots__ = '_local__key', '_local__args', '_local__lock'
+
+ def __new__(cls, *args, **kw):
+ self = object.__new__(cls)
+ key = '_local__key', 'thread.local.' + str(id(self))
+ object.__setattr__(self, '_local__key', key)
+ object.__setattr__(self, '_local__args', (args, kw))
+ object.__setattr__(self, '_local__lock', RLock())
+
+ if args or kw and (cls.__init__ is object.__init__):
+ raise TypeError("Initialization arguments are not supported")
+
+ # We need to create the thread dict in anticipation of
+ # __init__ being called, to make sure we don't call it
+ # again ourselves.
+ dict = object.__getattribute__(self, '__dict__')
+ currentThread().__dict__[key] = dict
+
+ return self
+
+def _patch(self):
+ key = object.__getattribute__(self, '_local__key')
+ d = currentThread().__dict__.get(key)
+ if d is None:
+ d = {}
+ currentThread().__dict__[key] = d
+ object.__setattr__(self, '__dict__', d)
+
+ # we have a new instance dict, so call out __init__ if we have
+ # one
+ cls = type(self)
+ if cls.__init__ is not object.__init__:
+ args, kw = object.__getattribute__(self, '_local__args')
+ cls.__init__(self, *args, **kw)
+ else:
+ object.__setattr__(self, '__dict__', d)
+
+class local(_localbase):
+
+ def __getattribute__(self, name):
+ lock = object.__getattribute__(self, '_local__lock')
+ lock.acquire()
+ try:
+ _patch(self)
+ return object.__getattribute__(self, name)
+ finally:
+ lock.release()
+
+ def __setattr__(self, name, value):
+ lock = object.__getattribute__(self, '_local__lock')
+ lock.acquire()
+ try:
+ _patch(self)
+ return object.__setattr__(self, name, value)
+ finally:
+ lock.release()
+
+ def __delattr__(self, name):
+ lock = object.__getattribute__(self, '_local__lock')
+ lock.acquire()
+ try:
+ _patch(self)
+ return object.__delattr__(self, name)
+ finally:
+ lock.release()
+
+ def __del__(self):
+ import threading
+
+ key = object.__getattribute__(self, '_local__key')
+
+ try:
+ threads = list(threading.enumerate())
+ except:
+ # If enumerate fails, as it seems to do during
+ # shutdown, we'll skip cleanup under the assumption
+ # that there is nothing to clean up.
+ return
+
+ for thread in threads:
+ try:
+ __dict__ = thread.__dict__
+ except AttributeError:
+ # Thread is dying, rest in peace.
+ continue
+
+ if key in __dict__:
+ try:
+ del __dict__[key]
+ except KeyError:
+ pass # didn't have anything in this thread
+
+from threading import currentThread, RLock
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/aifc.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,961 @@
+"""Stuff to parse AIFF-C and AIFF files.
+
+Unless explicitly stated otherwise, the description below is true
+both for AIFF-C files and AIFF files.
+
+An AIFF-C file has the following structure.
+
+ +-----------------+
+ | FORM |
+ +-----------------+
+ | <size> |
+ +----+------------+
+ | | AIFC |
+ | +------------+
+ | | <chunks> |
+ | | . |
+ | | . |
+ | | . |
+ +----+------------+
+
+An AIFF file has the string "AIFF" instead of "AIFC".
+
+A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
+big endian order), followed by the data. The size field does not include
+the size of the 8 byte header.
+
+The following chunk types are recognized.
+
+ FVER
+ <version number of AIFF-C defining document> (AIFF-C only).
+ MARK
+ <# of markers> (2 bytes)
+ list of markers:
+ <marker ID> (2 bytes, must be > 0)
+ <position> (4 bytes)
+ <marker name> ("pstring")
+ COMM
+ <# of channels> (2 bytes)
+ <# of sound frames> (4 bytes)
+ <size of the samples> (2 bytes)
+ <sampling frequency> (10 bytes, IEEE 80-bit extended
+ floating point)
+ in AIFF-C files only:
+ <compression type> (4 bytes)
+ <human-readable version of compression type> ("pstring")
+ SSND
+ <offset> (4 bytes, not used by this program)
+ <blocksize> (4 bytes, not used by this program)
+ <sound data>
+
+A pstring consists of 1 byte length, a string of characters, and 0 or 1
+byte pad to make the total length even.
+
+Usage.
+
+Reading AIFF files:
+ f = aifc.open(file, 'r')
+where file is either the name of a file or an open file pointer.
+The open file pointer must have methods read(), seek(), and close().
+In some types of audio files, if the setpos() method is not used,
+the seek() method is not necessary.
+
+This returns an instance of a class with the following public methods:
+ getnchannels() -- returns number of audio channels (1 for
+ mono, 2 for stereo)
+ getsampwidth() -- returns sample width in bytes
+ getframerate() -- returns sampling frequency
+ getnframes() -- returns number of audio frames
+ getcomptype() -- returns compression type ('NONE' for AIFF files)
+ getcompname() -- returns human-readable version of
+ compression type ('not compressed' for AIFF files)
+ getparams() -- returns a tuple consisting of all of the
+ above in the above order
+ getmarkers() -- get the list of marks in the audio file or None
+ if there are no marks
+ getmark(id) -- get mark with the specified id (raises an error
+ if the mark does not exist)
+ readframes(n) -- returns at most n frames of audio
+ rewind() -- rewind to the beginning of the audio stream
+ setpos(pos) -- seek to the specified position
+ tell() -- return the current position
+ close() -- close the instance (make it unusable)
+The position returned by tell(), the position given to setpos() and
+the position of marks are all compatible and have nothing to do with
+the actual position in the file.
+The close() method is called automatically when the class instance
+is destroyed.
+
+Writing AIFF files:
+ f = aifc.open(file, 'w')
+where file is either the name of a file or an open file pointer.
+The open file pointer must have methods write(), tell(), seek(), and
+close().
+
+This returns an instance of a class with the following public methods:
+ aiff() -- create an AIFF file (AIFF-C default)
+ aifc() -- create an AIFF-C file
+ setnchannels(n) -- set the number of channels
+ setsampwidth(n) -- set the sample width
+ setframerate(n) -- set the frame rate
+ setnframes(n) -- set the number of frames
+ setcomptype(type, name)
+ -- set the compression type and the
+ human-readable compression type
+ setparams(tuple)
+ -- set all parameters at once
+ setmark(id, pos, name)
+ -- add specified mark to the list of marks
+ tell() -- return current position in output file (useful
+ in combination with setmark())
+ writeframesraw(data)
+ -- write audio frames without pathing up the
+ file header
+ writeframes(data)
+ -- write audio frames and patch up the file header
+ close() -- patch up the file header and close the
+ output file
+You should set the parameters before the first writeframesraw or
+writeframes. The total number of frames does not need to be set,
+but when it is set to the correct value, the header does not have to
+be patched up.
+It is best to first set all parameters, perhaps possibly the
+compression type, and then write audio frames using writeframesraw.
+When all frames have been written, either call writeframes('') or
+close() to patch up the sizes in the header.
+Marks can be added anytime. If there are any marks, ypu must call
+close() after all frames have been written.
+The close() method is called automatically when the class instance
+is destroyed.
+
+When a file is opened with the extension '.aiff', an AIFF file is
+written, otherwise an AIFF-C file is written. This default can be
+changed by calling aiff() or aifc() before the first writeframes or
+writeframesraw.
+"""
+
+import struct
+import __builtin__
+
+__all__ = ["Error","open","openfp"]
+
+class Error(Exception):
+ pass
+
+_AIFC_version = 0xA2805140L # Version 1 of AIFF-C
+
+_skiplist = 'COMT', 'INST', 'MIDI', 'AESD', \
+ 'APPL', 'NAME', 'AUTH', '(c) ', 'ANNO'
+
+def _read_long(file):
+ try:
+ return struct.unpack('>l', file.read(4))[0]
+ except struct.error:
+ raise EOFError
+
+def _read_ulong(file):
+ try:
+ return struct.unpack('>L', file.read(4))[0]
+ except struct.error:
+ raise EOFError
+
+def _read_short(file):
+ try:
+ return struct.unpack('>h', file.read(2))[0]
+ except struct.error:
+ raise EOFError
+
+def _read_string(file):
+ length = ord(file.read(1))
+ if length == 0:
+ data = ''
+ else:
+ data = file.read(length)
+ if length & 1 == 0:
+ dummy = file.read(1)
+ return data
+
+_HUGE_VAL = 1.79769313486231e+308 # See <limits.h>
+
+def _read_float(f): # 10 bytes
+ expon = _read_short(f) # 2 bytes
+ sign = 1
+ if expon < 0:
+ sign = -1
+ expon = expon + 0x8000
+ himant = _read_ulong(f) # 4 bytes
+ lomant = _read_ulong(f) # 4 bytes
+ if expon == himant == lomant == 0:
+ f = 0.0
+ elif expon == 0x7FFF:
+ f = _HUGE_VAL
+ else:
+ expon = expon - 16383
+ f = (himant * 0x100000000L + lomant) * pow(2.0, expon - 63)
+ return sign * f
+
+def _write_short(f, x):
+ f.write(struct.pack('>h', x))
+
+def _write_long(f, x):
+ f.write(struct.pack('>L', x))
+
+def _write_string(f, s):
+ if len(s) > 255:
+ raise ValueError("string exceeds maximum pstring length")
+ f.write(chr(len(s)))
+ f.write(s)
+ if len(s) & 1 == 0:
+ f.write(chr(0))
+
+def _write_float(f, x):
+ import math
+ if x < 0:
+ sign = 0x8000
+ x = x * -1
+ else:
+ sign = 0
+ if x == 0:
+ expon = 0
+ himant = 0
+ lomant = 0
+ else:
+ fmant, expon = math.frexp(x)
+ if expon > 16384 or fmant >= 1: # Infinity or NaN
+ expon = sign|0x7FFF
+ himant = 0
+ lomant = 0
+ else: # Finite
+ expon = expon + 16382
+ if expon < 0: # denormalized
+ fmant = math.ldexp(fmant, expon)
+ expon = 0
+ expon = expon | sign
+ fmant = math.ldexp(fmant, 32)
+ fsmant = math.floor(fmant)
+ himant = long(fsmant)
+ fmant = math.ldexp(fmant - fsmant, 32)
+ fsmant = math.floor(fmant)
+ lomant = long(fsmant)
+ _write_short(f, expon)
+ _write_long(f, himant)
+ _write_long(f, lomant)
+
+from chunk import Chunk
+
+class Aifc_read:
+ # Variables used in this class:
+ #
+ # These variables are available to the user though appropriate
+ # methods of this class:
+ # _file -- the open file with methods read(), close(), and seek()
+ # set through the __init__() method
+ # _nchannels -- the number of audio channels
+ # available through the getnchannels() method
+ # _nframes -- the number of audio frames
+ # available through the getnframes() method
+ # _sampwidth -- the number of bytes per audio sample
+ # available through the getsampwidth() method
+ # _framerate -- the sampling frequency
+ # available through the getframerate() method
+ # _comptype -- the AIFF-C compression type ('NONE' if AIFF)
+ # available through the getcomptype() method
+ # _compname -- the human-readable AIFF-C compression type
+ # available through the getcomptype() method
+ # _markers -- the marks in the audio file
+ # available through the getmarkers() and getmark()
+ # methods
+ # _soundpos -- the position in the audio stream
+ # available through the tell() method, set through the
+ # setpos() method
+ #
+ # These variables are used internally only:
+ # _version -- the AIFF-C version number
+ # _decomp -- the decompressor from builtin module cl
+ # _comm_chunk_read -- 1 iff the COMM chunk has been read
+ # _aifc -- 1 iff reading an AIFF-C file
+ # _ssnd_seek_needed -- 1 iff positioned correctly in audio
+ # file for readframes()
+ # _ssnd_chunk -- instantiation of a chunk class for the SSND chunk
+ # _framesize -- size of one frame in the file
+
+ def initfp(self, file):
+ self._version = 0
+ self._decomp = None
+ self._convert = None
+ self._markers = []
+ self._soundpos = 0
+ self._file = Chunk(file)
+ if self._file.getname() != 'FORM':
+ raise Error, 'file does not start with FORM id'
+ formdata = self._file.read(4)
+ if formdata == 'AIFF':
+ self._aifc = 0
+ elif formdata == 'AIFC':
+ self._aifc = 1
+ else:
+ raise Error, 'not an AIFF or AIFF-C file'
+ self._comm_chunk_read = 0
+ while 1:
+ self._ssnd_seek_needed = 1
+ try:
+ chunk = Chunk(self._file)
+ except EOFError:
+ break
+ chunkname = chunk.getname()
+ if chunkname == 'COMM':
+ self._read_comm_chunk(chunk)
+ self._comm_chunk_read = 1
+ elif chunkname == 'SSND':
+ self._ssnd_chunk = chunk
+ dummy = chunk.read(8)
+ self._ssnd_seek_needed = 0
+ elif chunkname == 'FVER':
+ self._version = _read_ulong(chunk)
+ elif chunkname == 'MARK':
+ self._readmark(chunk)
+ elif chunkname in _skiplist:
+ pass
+ else:
+ raise Error, 'unrecognized chunk type '+chunk.chunkname
+ chunk.skip()
+ if not self._comm_chunk_read or not self._ssnd_chunk:
+ raise Error, 'COMM chunk and/or SSND chunk missing'
+ if self._aifc and self._decomp:
+ import cl
+ params = [cl.ORIGINAL_FORMAT, 0,
+ cl.BITS_PER_COMPONENT, self._sampwidth * 8,
+ cl.FRAME_RATE, self._framerate]
+ if self._nchannels == 1:
+ params[1] = cl.MONO
+ elif self._nchannels == 2:
+ params[1] = cl.STEREO_INTERLEAVED
+ else:
+ raise Error, 'cannot compress more than 2 channels'
+ self._decomp.SetParams(params)
+
+ def __init__(self, f):
+ if type(f) == type(''):
+ f = __builtin__.open(f, 'rb')
+ # else, assume it is an open file object already
+ self.initfp(f)
+
+ #
+ # User visible methods.
+ #
+ def getfp(self):
+ return self._file
+
+ def rewind(self):
+ self._ssnd_seek_needed = 1
+ self._soundpos = 0
+
+ def close(self):
+ if self._decomp:
+ self._decomp.CloseDecompressor()
+ self._decomp = None
+ self._file = None
+
+ def tell(self):
+ return self._soundpos
+
+ def getnchannels(self):
+ return self._nchannels
+
+ def getnframes(self):
+ return self._nframes
+
+ def getsampwidth(self):
+ return self._sampwidth
+
+ def getframerate(self):
+ return self._framerate
+
+ def getcomptype(self):
+ return self._comptype
+
+ def getcompname(self):
+ return self._compname
+
+## def getversion(self):
+## return self._version
+
+ def getparams(self):
+ return self.getnchannels(), self.getsampwidth(), \
+ self.getframerate(), self.getnframes(), \
+ self.getcomptype(), self.getcompname()
+
+ def getmarkers(self):
+ if len(self._markers) == 0:
+ return None
+ return self._markers
+
+ def getmark(self, id):
+ for marker in self._markers:
+ if id == marker[0]:
+ return marker
+ raise Error, 'marker %r does not exist' % (id,)
+
+ def setpos(self, pos):
+ if pos < 0 or pos > self._nframes:
+ raise Error, 'position not in range'
+ self._soundpos = pos
+ self._ssnd_seek_needed = 1
+
+ def readframes(self, nframes):
+ if self._ssnd_seek_needed:
+ self._ssnd_chunk.seek(0)
+ dummy = self._ssnd_chunk.read(8)
+ pos = self._soundpos * self._framesize
+ if pos:
+ self._ssnd_chunk.seek(pos + 8)
+ self._ssnd_seek_needed = 0
+ if nframes == 0:
+ return ''
+ data = self._ssnd_chunk.read(nframes * self._framesize)
+ if self._convert and data:
+ data = self._convert(data)
+ self._soundpos = self._soundpos + len(data) / (self._nchannels * self._sampwidth)
+ return data
+
+ #
+ # Internal methods.
+ #
+
+ def _decomp_data(self, data):
+ import cl
+ dummy = self._decomp.SetParam(cl.FRAME_BUFFER_SIZE,
+ len(data) * 2)
+ return self._decomp.Decompress(len(data) / self._nchannels,
+ data)
+
+ def _ulaw2lin(self, data):
+ import audioop
+ return audioop.ulaw2lin(data, 2)
+
+ def _adpcm2lin(self, data):
+ import audioop
+ if not hasattr(self, '_adpcmstate'):
+ # first time
+ self._adpcmstate = None
+ data, self._adpcmstate = audioop.adpcm2lin(data, 2,
+ self._adpcmstate)
+ return data
+
+ def _read_comm_chunk(self, chunk):
+ self._nchannels = _read_short(chunk)
+ self._nframes = _read_long(chunk)
+ self._sampwidth = (_read_short(chunk) + 7) / 8
+ self._framerate = int(_read_float(chunk))
+ self._framesize = self._nchannels * self._sampwidth
+ if self._aifc:
+ #DEBUG: SGI's soundeditor produces a bad size :-(
+ kludge = 0
+ if chunk.chunksize == 18:
+ kludge = 1
+ print 'Warning: bad COMM chunk size'
+ chunk.chunksize = 23
+ #DEBUG end
+ self._comptype = chunk.read(4)
+ #DEBUG start
+ if kludge:
+ length = ord(chunk.file.read(1))
+ if length & 1 == 0:
+ length = length + 1
+ chunk.chunksize = chunk.chunksize + length
+ chunk.file.seek(-1, 1)
+ #DEBUG end
+ self._compname = _read_string(chunk)
+ if self._comptype != 'NONE':
+ if self._comptype == 'G722':
+ try:
+ import audioop
+ except ImportError:
+ pass
+ else:
+ self._convert = self._adpcm2lin
+ self._framesize = self._framesize / 4
+ return
+ # for ULAW and ALAW try Compression Library
+ try:
+ import cl
+ except ImportError:
+ if self._comptype == 'ULAW':
+ try:
+ import audioop
+ self._convert = self._ulaw2lin
+ self._framesize = self._framesize / 2
+ return
+ except ImportError:
+ pass
+ raise Error, 'cannot read compressed AIFF-C files'
+ if self._comptype == 'ULAW':
+ scheme = cl.G711_ULAW
+ self._framesize = self._framesize / 2
+ elif self._comptype == 'ALAW':
+ scheme = cl.G711_ALAW
+ self._framesize = self._framesize / 2
+ else:
+ raise Error, 'unsupported compression type'
+ self._decomp = cl.OpenDecompressor(scheme)
+ self._convert = self._decomp_data
+ else:
+ self._comptype = 'NONE'
+ self._compname = 'not compressed'
+
+ def _readmark(self, chunk):
+ nmarkers = _read_short(chunk)
+ # Some files appear to contain invalid counts.
+ # Cope with this by testing for EOF.
+ try:
+ for i in range(nmarkers):
+ id = _read_short(chunk)
+ pos = _read_long(chunk)
+ name = _read_string(chunk)
+ if pos or name:
+ # some files appear to have
+ # dummy markers consisting of
+ # a position 0 and name ''
+ self._markers.append((id, pos, name))
+ except EOFError:
+ print 'Warning: MARK chunk contains only',
+ print len(self._markers),
+ if len(self._markers) == 1: print 'marker',
+ else: print 'markers',
+ print 'instead of', nmarkers
+
+class Aifc_write:
+ # Variables used in this class:
+ #
+ # These variables are user settable through appropriate methods
+ # of this class:
+ # _file -- the open file with methods write(), close(), tell(), seek()
+ # set through the __init__() method
+ # _comptype -- the AIFF-C compression type ('NONE' in AIFF)
+ # set through the setcomptype() or setparams() method
+ # _compname -- the human-readable AIFF-C compression type
+ # set through the setcomptype() or setparams() method
+ # _nchannels -- the number of audio channels
+ # set through the setnchannels() or setparams() method
+ # _sampwidth -- the number of bytes per audio sample
+ # set through the setsampwidth() or setparams() method
+ # _framerate -- the sampling frequency
+ # set through the setframerate() or setparams() method
+ # _nframes -- the number of audio frames written to the header
+ # set through the setnframes() or setparams() method
+ # _aifc -- whether we're writing an AIFF-C file or an AIFF file
+ # set through the aifc() method, reset through the
+ # aiff() method
+ #
+ # These variables are used internally only:
+ # _version -- the AIFF-C version number
+ # _comp -- the compressor from builtin module cl
+ # _nframeswritten -- the number of audio frames actually written
+ # _datalength -- the size of the audio samples written to the header
+ # _datawritten -- the size of the audio samples actually written
+
+ def __init__(self, f):
+ if type(f) == type(''):
+ filename = f
+ f = __builtin__.open(f, 'wb')
+ else:
+ # else, assume it is an open file object already
+ filename = '???'
+ self.initfp(f)
+ if filename[-5:] == '.aiff':
+ self._aifc = 0
+ else:
+ self._aifc = 1
+
+ def initfp(self, file):
+ self._file = file
+ self._version = _AIFC_version
+ self._comptype = 'NONE'
+ self._compname = 'not compressed'
+ self._comp = None
+ self._convert = None
+ self._nchannels = 0
+ self._sampwidth = 0
+ self._framerate = 0
+ self._nframes = 0
+ self._nframeswritten = 0
+ self._datawritten = 0
+ self._datalength = 0
+ self._markers = []
+ self._marklength = 0
+ self._aifc = 1 # AIFF-C is default
+
+ def __del__(self):
+ if self._file:
+ self.close()
+
+ #
+ # User visible methods.
+ #
+ def aiff(self):
+ if self._nframeswritten:
+ raise Error, 'cannot change parameters after starting to write'
+ self._aifc = 0
+
+ def aifc(self):
+ if self._nframeswritten:
+ raise Error, 'cannot change parameters after starting to write'
+ self._aifc = 1
+
+ def setnchannels(self, nchannels):
+ if self._nframeswritten:
+ raise Error, 'cannot change parameters after starting to write'
+ if nchannels < 1:
+ raise Error, 'bad # of channels'
+ self._nchannels = nchannels
+
+ def getnchannels(self):
+ if not self._nchannels:
+ raise Error, 'number of channels not set'
+ return self._nchannels
+
+ def setsampwidth(self, sampwidth):
+ if self._nframeswritten:
+ raise Error, 'cannot change parameters after starting to write'
+ if sampwidth < 1 or sampwidth > 4:
+ raise Error, 'bad sample width'
+ self._sampwidth = sampwidth
+
+ def getsampwidth(self):
+ if not self._sampwidth:
+ raise Error, 'sample width not set'
+ return self._sampwidth
+
+ def setframerate(self, framerate):
+ if self._nframeswritten:
+ raise Error, 'cannot change parameters after starting to write'
+ if framerate <= 0:
+ raise Error, 'bad frame rate'
+ self._framerate = framerate
+
+ def getframerate(self):
+ if not self._framerate:
+ raise Error, 'frame rate not set'
+ return self._framerate
+
+ def setnframes(self, nframes):
+ if self._nframeswritten:
+ raise Error, 'cannot change parameters after starting to write'
+ self._nframes = nframes
+
+ def getnframes(self):
+ return self._nframeswritten
+
+ def setcomptype(self, comptype, compname):
+ if self._nframeswritten:
+ raise Error, 'cannot change parameters after starting to write'
+ if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'):
+ raise Error, 'unsupported compression type'
+ self._comptype = comptype
+ self._compname = compname
+
+ def getcomptype(self):
+ return self._comptype
+
+ def getcompname(self):
+ return self._compname
+
+## def setversion(self, version):
+## if self._nframeswritten:
+## raise Error, 'cannot change parameters after starting to write'
+## self._version = version
+
+ def setparams(self, (nchannels, sampwidth, framerate, nframes, comptype, compname)):
+ if self._nframeswritten:
+ raise Error, 'cannot change parameters after starting to write'
+ if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'):
+ raise Error, 'unsupported compression type'
+ self.setnchannels(nchannels)
+ self.setsampwidth(sampwidth)
+ self.setframerate(framerate)
+ self.setnframes(nframes)
+ self.setcomptype(comptype, compname)
+
+ def getparams(self):
+ if not self._nchannels or not self._sampwidth or not self._framerate:
+ raise Error, 'not all parameters set'
+ return self._nchannels, self._sampwidth, self._framerate, \
+ self._nframes, self._comptype, self._compname
+
+ def setmark(self, id, pos, name):
+ if id <= 0:
+ raise Error, 'marker ID must be > 0'
+ if pos < 0:
+ raise Error, 'marker position must be >= 0'
+ if type(name) != type(''):
+ raise Error, 'marker name must be a string'
+ for i in range(len(self._markers)):
+ if id == self._markers[i][0]:
+ self._markers[i] = id, pos, name
+ return
+ self._markers.append((id, pos, name))
+
+ def getmark(self, id):
+ for marker in self._markers:
+ if id == marker[0]:
+ return marker
+ raise Error, 'marker %r does not exist' % (id,)
+
+ def getmarkers(self):
+ if len(self._markers) == 0:
+ return None
+ return self._markers
+
+ def tell(self):
+ return self._nframeswritten
+
+ def writeframesraw(self, data):
+ self._ensure_header_written(len(data))
+ nframes = len(data) / (self._sampwidth * self._nchannels)
+ if self._convert:
+ data = self._convert(data)
+ self._file.write(data)
+ self._nframeswritten = self._nframeswritten + nframes
+ self._datawritten = self._datawritten + len(data)
+
+ def writeframes(self, data):
+ self.writeframesraw(data)
+ if self._nframeswritten != self._nframes or \
+ self._datalength != self._datawritten:
+ self._patchheader()
+
+ def close(self):
+ self._ensure_header_written(0)
+ if self._datawritten & 1:
+ # quick pad to even size
+ self._file.write(chr(0))
+ self._datawritten = self._datawritten + 1
+ self._writemarkers()
+ if self._nframeswritten != self._nframes or \
+ self._datalength != self._datawritten or \
+ self._marklength:
+ self._patchheader()
+ if self._comp:
+ self._comp.CloseCompressor()
+ self._comp = None
+ self._file.flush()
+ self._file = None
+
+ #
+ # Internal methods.
+ #
+
+ def _comp_data(self, data):
+ import cl
+ dummy = self._comp.SetParam(cl.FRAME_BUFFER_SIZE, len(data))
+ dummy = self._comp.SetParam(cl.COMPRESSED_BUFFER_SIZE, len(data))
+ return self._comp.Compress(self._nframes, data)
+
+ def _lin2ulaw(self, data):
+ import audioop
+ return audioop.lin2ulaw(data, 2)
+
+ def _lin2adpcm(self, data):
+ import audioop
+ if not hasattr(self, '_adpcmstate'):
+ self._adpcmstate = None
+ data, self._adpcmstate = audioop.lin2adpcm(data, 2,
+ self._adpcmstate)
+ return data
+
+ def _ensure_header_written(self, datasize):
+ if not self._nframeswritten:
+ if self._comptype in ('ULAW', 'ALAW'):
+ if not self._sampwidth:
+ self._sampwidth = 2
+ if self._sampwidth != 2:
+ raise Error, 'sample width must be 2 when compressing with ULAW or ALAW'
+ if self._comptype == 'G722':
+ if not self._sampwidth:
+ self._sampwidth = 2
+ if self._sampwidth != 2:
+ raise Error, 'sample width must be 2 when compressing with G7.22 (ADPCM)'
+ if not self._nchannels:
+ raise Error, '# channels not specified'
+ if not self._sampwidth:
+ raise Error, 'sample width not specified'
+ if not self._framerate:
+ raise Error, 'sampling rate not specified'
+ self._write_header(datasize)
+
+ def _init_compression(self):
+ if self._comptype == 'G722':
+ self._convert = self._lin2adpcm
+ return
+ try:
+ import cl
+ except ImportError:
+ if self._comptype == 'ULAW':
+ try:
+ import audioop
+ self._convert = self._lin2ulaw
+ return
+ except ImportError:
+ pass
+ raise Error, 'cannot write compressed AIFF-C files'
+ if self._comptype == 'ULAW':
+ scheme = cl.G711_ULAW
+ elif self._comptype == 'ALAW':
+ scheme = cl.G711_ALAW
+ else:
+ raise Error, 'unsupported compression type'
+ self._comp = cl.OpenCompressor(scheme)
+ params = [cl.ORIGINAL_FORMAT, 0,
+ cl.BITS_PER_COMPONENT, self._sampwidth * 8,
+ cl.FRAME_RATE, self._framerate,
+ cl.FRAME_BUFFER_SIZE, 100,
+ cl.COMPRESSED_BUFFER_SIZE, 100]
+ if self._nchannels == 1:
+ params[1] = cl.MONO
+ elif self._nchannels == 2:
+ params[1] = cl.STEREO_INTERLEAVED
+ else:
+ raise Error, 'cannot compress more than 2 channels'
+ self._comp.SetParams(params)
+ # the compressor produces a header which we ignore
+ dummy = self._comp.Compress(0, '')
+ self._convert = self._comp_data
+
+ def _write_header(self, initlength):
+ if self._aifc and self._comptype != 'NONE':
+ self._init_compression()
+ self._file.write('FORM')
+ if not self._nframes:
+ self._nframes = initlength / (self._nchannels * self._sampwidth)
+ self._datalength = self._nframes * self._nchannels * self._sampwidth
+ if self._datalength & 1:
+ self._datalength = self._datalength + 1
+ if self._aifc:
+ if self._comptype in ('ULAW', 'ALAW'):
+ self._datalength = self._datalength / 2
+ if self._datalength & 1:
+ self._datalength = self._datalength + 1
+ elif self._comptype == 'G722':
+ self._datalength = (self._datalength + 3) / 4
+ if self._datalength & 1:
+ self._datalength = self._datalength + 1
+ self._form_length_pos = self._file.tell()
+ commlength = self._write_form_length(self._datalength)
+ if self._aifc:
+ self._file.write('AIFC')
+ self._file.write('FVER')
+ _write_long(self._file, 4)
+ _write_long(self._file, self._version)
+ else:
+ self._file.write('AIFF')
+ self._file.write('COMM')
+ _write_long(self._file, commlength)
+ _write_short(self._file, self._nchannels)
+ self._nframes_pos = self._file.tell()
+ _write_long(self._file, self._nframes)
+ _write_short(self._file, self._sampwidth * 8)
+ _write_float(self._file, self._framerate)
+ if self._aifc:
+ self._file.write(self._comptype)
+ _write_string(self._file, self._compname)
+ self._file.write('SSND')
+ self._ssnd_length_pos = self._file.tell()
+ _write_long(self._file, self._datalength + 8)
+ _write_long(self._file, 0)
+ _write_long(self._file, 0)
+
+ def _write_form_length(self, datalength):
+ if self._aifc:
+ commlength = 18 + 5 + len(self._compname)
+ if commlength & 1:
+ commlength = commlength + 1
+ verslength = 12
+ else:
+ commlength = 18
+ verslength = 0
+ _write_long(self._file, 4 + verslength + self._marklength + \
+ 8 + commlength + 16 + datalength)
+ return commlength
+
+ def _patchheader(self):
+ curpos = self._file.tell()
+ if self._datawritten & 1:
+ datalength = self._datawritten + 1
+ self._file.write(chr(0))
+ else:
+ datalength = self._datawritten
+ if datalength == self._datalength and \
+ self._nframes == self._nframeswritten and \
+ self._marklength == 0:
+ self._file.seek(curpos, 0)
+ return
+ self._file.seek(self._form_length_pos, 0)
+ dummy = self._write_form_length(datalength)
+ self._file.seek(self._nframes_pos, 0)
+ _write_long(self._file, self._nframeswritten)
+ self._file.seek(self._ssnd_length_pos, 0)
+ _write_long(self._file, datalength + 8)
+ self._file.seek(curpos, 0)
+ self._nframes = self._nframeswritten
+ self._datalength = datalength
+
+ def _writemarkers(self):
+ if len(self._markers) == 0:
+ return
+ self._file.write('MARK')
+ length = 2
+ for marker in self._markers:
+ id, pos, name = marker
+ length = length + len(name) + 1 + 6
+ if len(name) & 1 == 0:
+ length = length + 1
+ _write_long(self._file, length)
+ self._marklength = length + 8
+ _write_short(self._file, len(self._markers))
+ for marker in self._markers:
+ id, pos, name = marker
+ _write_short(self._file, id)
+ _write_long(self._file, pos)
+ _write_string(self._file, name)
+
+def open(f, mode=None):
+ if mode is None:
+ if hasattr(f, 'mode'):
+ mode = f.mode
+ else:
+ mode = 'rb'
+ if mode in ('r', 'rb'):
+ return Aifc_read(f)
+ elif mode in ('w', 'wb'):
+ return Aifc_write(f)
+ else:
+ raise Error, "mode must be 'r', 'rb', 'w', or 'wb'"
+
+openfp = open # B/W compatibility
+
+if __name__ == '__main__':
+ import sys
+ if not sys.argv[1:]:
+ sys.argv.append('/usr/demos/data/audio/bach.aiff')
+ fn = sys.argv[1]
+ f = open(fn, 'r')
+ print "Reading", fn
+ print "nchannels =", f.getnchannels()
+ print "nframes =", f.getnframes()
+ print "sampwidth =", f.getsampwidth()
+ print "framerate =", f.getframerate()
+ print "comptype =", f.getcomptype()
+ print "compname =", f.getcompname()
+ if sys.argv[2:]:
+ gn = sys.argv[2]
+ print "Writing", gn
+ g = open(gn, 'w')
+ g.setparams(f.getparams())
+ while 1:
+ data = f.readframes(1024)
+ if not data:
+ break
+ g.writeframes(data)
+ g.close()
+ f.close()
+ print "Done."
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/anydbm.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,83 @@
+"""Generic interface to all dbm clones.
+
+Instead of
+
+ import dbm
+ d = dbm.open(file, 'w', 0666)
+
+use
+
+ import anydbm
+ d = anydbm.open(file, 'w')
+
+The returned object is a dbhash, gdbm, dbm or dumbdbm object,
+dependent on the type of database being opened (determined by whichdb
+module) in the case of an existing dbm. If the dbm does not exist and
+the create or new flag ('c' or 'n') was specified, the dbm type will
+be determined by the availability of the modules (tested in the above
+order).
+
+It has the following interface (key and data are strings):
+
+ d[key] = data # store data at key (may override data at
+ # existing key)
+ data = d[key] # retrieve data at key (raise KeyError if no
+ # such key)
+ del d[key] # delete data stored at key (raises KeyError
+ # if no such key)
+ flag = key in d # true if the key exists
+ list = d.keys() # return a list of all existing keys (slow!)
+
+Future versions may change the order in which implementations are
+tested for existence, add interfaces to other dbm-like
+implementations.
+
+The open function has an optional second argument. This can be 'r',
+for read-only access, 'w', for read-write access of an existing
+database, 'c' for read-write access to a new or existing database, and
+'n' for read-write access to a new database. The default is 'r'.
+
+Note: 'r' and 'w' fail if the database doesn't exist; 'c' creates it
+only if it doesn't exist; and 'n' always creates a new database.
+
+"""
+
+class error(Exception):
+ pass
+
+_names = ['dbhash', 'gdbm', 'dbm', 'dumbdbm']
+_errors = [error]
+_defaultmod = None
+
+for _name in _names:
+ try:
+ _mod = __import__(_name)
+ except ImportError:
+ continue
+ if not _defaultmod:
+ _defaultmod = _mod
+ _errors.append(_mod.error)
+
+if not _defaultmod:
+ raise ImportError, "no dbm clone found; tried %s" % _names
+
+error = tuple(_errors)
+
+def open(file, flag = 'r', mode = 0666):
+ # guess the type of an existing database
+ from whichdb import whichdb
+ result=whichdb(file)
+ if result is None:
+ # db doesn't exist
+ if 'c' in flag or 'n' in flag:
+ # file doesn't exist and the new
+ # flag was used so use default type
+ mod = _defaultmod
+ else:
+ raise error, "need 'c' or 'n' flag to open new db"
+ elif result == "":
+ # db type cannot be determined
+ raise error, "db type could not be determined"
+ else:
+ mod = __import__(result)
+ return mod.open(file, flag, mode)
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/asynchat.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,295 @@
+# -*- Mode: Python; tab-width: 4 -*-
+# Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
+# Author: Sam Rushing <rushing@nightmare.com>
+
+# ======================================================================
+# Copyright 1996 by Sam Rushing
+#
+# All Rights Reserved
+#
+# Permission to use, copy, modify, and distribute this software and
+# its documentation for any purpose and without fee is hereby
+# granted, provided that the above copyright notice appear in all
+# copies and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of Sam
+# Rushing not be used in advertising or publicity pertaining to
+# distribution of the software without specific, written prior
+# permission.
+#
+# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
+# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+# ======================================================================
+
+r"""A class supporting chat-style (command/response) protocols.
+
+This class adds support for 'chat' style protocols - where one side
+sends a 'command', and the other sends a response (examples would be
+the common internet protocols - smtp, nntp, ftp, etc..).
+
+The handle_read() method looks at the input stream for the current
+'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n'
+for multi-line output), calling self.found_terminator() on its
+receipt.
+
+for example:
+Say you build an async nntp client using this class. At the start
+of the connection, you'll have self.terminator set to '\r\n', in
+order to process the single-line greeting. Just before issuing a
+'LIST' command you'll set it to '\r\n.\r\n'. The output of the LIST
+command will be accumulated (using your own 'collect_incoming_data'
+method) up to the terminator, and then control will be returned to
+you - by calling your self.found_terminator() method.
+"""
+
+import socket
+import asyncore
+from collections import deque
+
+class async_chat (asyncore.dispatcher):
+ """This is an abstract class. You must derive from this class, and add
+ the two methods collect_incoming_data() and found_terminator()"""
+
+ # these are overridable defaults
+
+ ac_in_buffer_size = 4096
+ ac_out_buffer_size = 4096
+
+ def __init__ (self, conn=None):
+ self.ac_in_buffer = ''
+ self.ac_out_buffer = ''
+ self.producer_fifo = fifo()
+ asyncore.dispatcher.__init__ (self, conn)
+
+ def collect_incoming_data(self, data):
+ raise NotImplementedError, "must be implemented in subclass"
+
+ def found_terminator(self):
+ raise NotImplementedError, "must be implemented in subclass"
+
+ def set_terminator (self, term):
+ "Set the input delimiter. Can be a fixed string of any length, an integer, or None"
+ self.terminator = term
+
+ def get_terminator (self):
+ return self.terminator
+
+ # grab some more data from the socket,
+ # throw it to the collector method,
+ # check for the terminator,
+ # if found, transition to the next state.
+
+ def handle_read (self):
+
+ try:
+ data = self.recv (self.ac_in_buffer_size)
+ except socket.error, why:
+ self.handle_error()
+ return
+
+ self.ac_in_buffer = self.ac_in_buffer + data
+
+ # Continue to search for self.terminator in self.ac_in_buffer,
+ # while calling self.collect_incoming_data. The while loop
+ # is necessary because we might read several data+terminator
+ # combos with a single recv(1024).
+
+ while self.ac_in_buffer:
+ lb = len(self.ac_in_buffer)
+ terminator = self.get_terminator()
+ if not terminator:
+ # no terminator, collect it all
+ self.collect_incoming_data (self.ac_in_buffer)
+ self.ac_in_buffer = ''
+ elif isinstance(terminator, int) or isinstance(terminator, long):
+ # numeric terminator
+ n = terminator
+ if lb < n:
+ self.collect_incoming_data (self.ac_in_buffer)
+ self.ac_in_buffer = ''
+ self.terminator = self.terminator - lb
+ else:
+ self.collect_incoming_data (self.ac_in_buffer[:n])
+ self.ac_in_buffer = self.ac_in_buffer[n:]
+ self.terminator = 0
+ self.found_terminator()
+ else:
+ # 3 cases:
+ # 1) end of buffer matches terminator exactly:
+ # collect data, transition
+ # 2) end of buffer matches some prefix:
+ # collect data to the prefix
+ # 3) end of buffer does not match any prefix:
+ # collect data
+ terminator_len = len(terminator)
+ index = self.ac_in_buffer.find(terminator)
+ if index != -1:
+ # we found the terminator
+ if index > 0:
+ # don't bother reporting the empty string (source of subtle bugs)
+ self.collect_incoming_data (self.ac_in_buffer[:index])
+ self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
+ # This does the Right Thing if the terminator is changed here.
+ self.found_terminator()
+ else:
+ # check for a prefix of the terminator
+ index = find_prefix_at_end (self.ac_in_buffer, terminator)
+ if index:
+ if index != lb:
+ # we found a prefix, collect up to the prefix
+ self.collect_incoming_data (self.ac_in_buffer[:-index])
+ self.ac_in_buffer = self.ac_in_buffer[-index:]
+ break
+ else:
+ # no prefix, collect it all
+ self.collect_incoming_data (self.ac_in_buffer)
+ self.ac_in_buffer = ''
+
+ def handle_write (self):
+ self.initiate_send ()
+
+ def handle_close (self):
+ self.close()
+
+ def push (self, data):
+ self.producer_fifo.push (simple_producer (data))
+ self.initiate_send()
+
+ def push_with_producer (self, producer):
+ self.producer_fifo.push (producer)
+ self.initiate_send()
+
+ def readable (self):
+ "predicate for inclusion in the readable for select()"
+ return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
+
+ def writable (self):
+ "predicate for inclusion in the writable for select()"
+ # return len(self.ac_out_buffer) or len(self.producer_fifo) or (not self.connected)
+ # this is about twice as fast, though not as clear.
+ return not (
+ (self.ac_out_buffer == '') and
+ self.producer_fifo.is_empty() and
+ self.connected
+ )
+
+ def close_when_done (self):
+ "automatically close this channel once the outgoing queue is empty"
+ self.producer_fifo.push (None)
+
+ # refill the outgoing buffer by calling the more() method
+ # of the first producer in the queue
+ def refill_buffer (self):
+ while 1:
+ if len(self.producer_fifo):
+ p = self.producer_fifo.first()
+ # a 'None' in the producer fifo is a sentinel,
+ # telling us to close the channel.
+ if p is None:
+ if not self.ac_out_buffer:
+ self.producer_fifo.pop()
+ self.close()
+ return
+ elif isinstance(p, str):
+ self.producer_fifo.pop()
+ self.ac_out_buffer = self.ac_out_buffer + p
+ return
+ data = p.more()
+ if data:
+ self.ac_out_buffer = self.ac_out_buffer + data
+ return
+ else:
+ self.producer_fifo.pop()
+ else:
+ return
+
+ def initiate_send (self):
+ obs = self.ac_out_buffer_size
+ # try to refill the buffer
+ if (len (self.ac_out_buffer) < obs):
+ self.refill_buffer()
+
+ if self.ac_out_buffer and self.connected:
+ # try to send the buffer
+ try:
+ num_sent = self.send (self.ac_out_buffer[:obs])
+ if num_sent:
+ self.ac_out_buffer = self.ac_out_buffer[num_sent:]
+
+ except socket.error, why:
+ self.handle_error()
+ return
+
+ def discard_buffers (self):
+ # Emergencies only!
+ self.ac_in_buffer = ''
+ self.ac_out_buffer = ''
+ while self.producer_fifo:
+ self.producer_fifo.pop()
+
+
+class simple_producer:
+
+ def __init__ (self, data, buffer_size=512):
+ self.data = data
+ self.buffer_size = buffer_size
+
+ def more (self):
+ if len (self.data) > self.buffer_size:
+ result = self.data[:self.buffer_size]
+ self.data = self.data[self.buffer_size:]
+ return result
+ else:
+ result = self.data
+ self.data = ''
+ return result
+
+class fifo:
+ def __init__ (self, list=None):
+ if not list:
+ self.list = deque()
+ else:
+ self.list = deque(list)
+
+ def __len__ (self):
+ return len(self.list)
+
+ def is_empty (self):
+ return not self.list
+
+ def first (self):
+ return self.list[0]
+
+ def push (self, data):
+ self.list.append(data)
+
+ def pop (self):
+ if self.list:
+ return (1, self.list.popleft())
+ else:
+ return (0, None)
+
+# Given 'haystack', see if any prefix of 'needle' is at its end. This
+# assumes an exact match has already been checked. Return the number of
+# characters matched.
+# for example:
+# f_p_a_e ("qwerty\r", "\r\n") => 1
+# f_p_a_e ("qwertydkjf", "\r\n") => 0
+# f_p_a_e ("qwerty\r\n", "\r\n") => <undefined>
+
+# this could maybe be made faster with a computed regex?
+# [answer: no; circa Python-2.0, Jan 2001]
+# new python: 28961/s
+# old python: 18307/s
+# re: 12820/s
+# regex: 14035/s
+
+def find_prefix_at_end (haystack, needle):
+ l = len(needle) - 1
+ while l and not haystack.endswith(needle[:l]):
+ l -= 1
+ return l
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/asyncore.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,551 @@
+# -*- Mode: Python -*-
+# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
+# Author: Sam Rushing <rushing@nightmare.com>
+
+# ======================================================================
+# Copyright 1996 by Sam Rushing
+#
+# All Rights Reserved
+#
+# Permission to use, copy, modify, and distribute this software and
+# its documentation for any purpose and without fee is hereby
+# granted, provided that the above copyright notice appear in all
+# copies and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of Sam
+# Rushing not be used in advertising or publicity pertaining to
+# distribution of the software without specific, written prior
+# permission.
+#
+# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
+# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+# ======================================================================
+
+"""Basic infrastructure for asynchronous socket service clients and servers.
+
+There are only two ways to have a program on a single processor do "more
+than one thing at a time". Multi-threaded programming is the simplest and
+most popular way to do it, but there is another very different technique,
+that lets you have nearly all the advantages of multi-threading, without
+actually using multiple threads. it's really only practical if your program
+is largely I/O bound. If your program is CPU bound, then pre-emptive
+scheduled threads are probably what you really need. Network servers are
+rarely CPU-bound, however.
+
+If your operating system supports the select() system call in its I/O
+library (and nearly all do), then you can use it to juggle multiple
+communication channels at once; doing other work while your I/O is taking
+place in the "background." Although this strategy can seem strange and
+complex, especially at first, it is in many ways easier to understand and
+control than multi-threaded programming. The module documented here solves
+many of the difficult problems for you, making the task of building
+sophisticated high-performance network servers and clients a snap.
+"""
+
+import select
+import socket
+import sys
+import time
+
+import os
+from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, \
+ ENOTCONN, ESHUTDOWN, EINTR, EISCONN, errorcode
+
+try:
+ socket_map
+except NameError:
+ socket_map = {}
+
+class ExitNow(Exception):
+ pass
+
+def read(obj):
+ try:
+ obj.handle_read_event()
+ except ExitNow:
+ raise
+ except:
+ obj.handle_error()
+
+def write(obj):
+ try:
+ obj.handle_write_event()
+ except ExitNow:
+ raise
+ except:
+ obj.handle_error()
+
+def _exception (obj):
+ try:
+ obj.handle_expt_event()
+ except ExitNow:
+ raise
+ except:
+ obj.handle_error()
+
+def readwrite(obj, flags):
+ try:
+ if flags & (select.POLLIN | select.POLLPRI):
+ obj.handle_read_event()
+ if flags & select.POLLOUT:
+ obj.handle_write_event()
+ if flags & (select.POLLERR | select.POLLHUP | select.POLLNVAL):
+ obj.handle_expt_event()
+ except ExitNow:
+ raise
+ except:
+ obj.handle_error()
+
+def poll(timeout=0.0, map=None):
+ if map is None:
+ map = socket_map
+ if map:
+ r = []; w = []; e = []
+ for fd, obj in map.items():
+ is_r = obj.readable()
+ is_w = obj.writable()
+ if is_r:
+ r.append(fd)
+ if is_w:
+ w.append(fd)
+ if is_r or is_w:
+ e.append(fd)
+ if [] == r == w == e:
+ time.sleep(timeout)
+ else:
+ try:
+ r, w, e = select.select(r, w, e, timeout)
+ except select.error, err:
+ if err[0] != EINTR:
+ raise
+ else:
+ return
+
+ for fd in r:
+ obj = map.get(fd)
+ if obj is None:
+ continue
+ read(obj)
+
+ for fd in w:
+ obj = map.get(fd)
+ if obj is None:
+ continue
+ write(obj)
+
+ for fd in e:
+ obj = map.get(fd)
+ if obj is None:
+ continue
+ _exception(obj)
+
+def poll2(timeout=0.0, map=None):
+ # Use the poll() support added to the select module in Python 2.0
+ if map is None:
+ map = socket_map
+ if timeout is not None:
+ # timeout is in milliseconds
+ timeout = int(timeout*1000)
+ pollster = select.poll()
+ if map:
+ for fd, obj in map.items():
+ flags = 0
+ if obj.readable():
+ flags |= select.POLLIN | select.POLLPRI
+ if obj.writable():
+ flags |= select.POLLOUT
+ if flags:
+ # Only check for exceptions if object was either readable
+ # or writable.
+ flags |= select.POLLERR | select.POLLHUP | select.POLLNVAL
+ pollster.register(fd, flags)
+ try:
+ r = pollster.poll(timeout)
+ except select.error, err:
+ if err[0] != EINTR:
+ raise
+ r = []
+ for fd, flags in r:
+ obj = map.get(fd)
+ if obj is None:
+ continue
+ readwrite(obj, flags)
+
+poll3 = poll2 # Alias for backward compatibility
+
+def loop(timeout=30.0, use_poll=False, map=None, count=None):
+ if map is None:
+ map = socket_map
+
+ if use_poll and hasattr(select, 'poll'):
+ poll_fun = poll2
+ else:
+ poll_fun = poll
+
+ if count is None:
+ while map:
+ poll_fun(timeout, map)
+
+ else:
+ while map and count > 0:
+ poll_fun(timeout, map)
+ count = count - 1
+
+class dispatcher:
+
+ debug = False
+ connected = False
+ accepting = False
+ closing = False
+ addr = None
+
+ def __init__(self, sock=None, map=None):
+ if map is None:
+ self._map = socket_map
+ else:
+ self._map = map
+
+ if sock:
+ self.set_socket(sock, map)
+ # I think it should inherit this anyway
+ self.socket.setblocking(0)
+ self.connected = True
+ # XXX Does the constructor require that the socket passed
+ # be connected?
+ try:
+ self.addr = sock.getpeername()
+ except socket.error:
+ # The addr isn't crucial
+ pass
+ else:
+ self.socket = None
+
+ def __repr__(self):
+ status = [self.__class__.__module__+"."+self.__class__.__name__]
+ if self.accepting and self.addr:
+ status.append('listening')
+ elif self.connected:
+ status.append('connected')
+ if self.addr is not None:
+ try:
+ status.append('%s:%d' % self.addr)
+ except TypeError:
+ status.append(repr(self.addr))
+ return '<%s at %#x>' % (' '.join(status), id(self))
+
+ def add_channel(self, map=None):
+ #self.log_info('adding channel %s' % self)
+ if map is None:
+ map = self._map
+ map[self._fileno] = self
+
+ def del_channel(self, map=None):
+ fd = self._fileno
+ if map is None:
+ map = self._map
+ if map.has_key(fd):
+ #self.log_info('closing channel %d:%s' % (fd, self))
+ del map[fd]
+ self._fileno = None
+
+ def create_socket(self, family, type):
+ self.family_and_type = family, type
+ self.socket = socket.socket(family, type)
+ self.socket.setblocking(0)
+ self._fileno = self.socket.fileno()
+ self.add_channel()
+
+ def set_socket(self, sock, map=None):
+ self.socket = sock
+## self.__dict__['socket'] = sock
+ self._fileno = sock.fileno()
+ self.add_channel(map)
+
+ def set_reuse_addr(self):
+ # try to re-use a server port if possible
+ try:
+ self.socket.setsockopt(
+ socket.SOL_SOCKET, socket.SO_REUSEADDR,
+ self.socket.getsockopt(socket.SOL_SOCKET,
+ socket.SO_REUSEADDR) | 1
+ )
+ except socket.error:
+ pass
+
+ # ==================================================
+ # predicates for select()
+ # these are used as filters for the lists of sockets
+ # to pass to select().
+ # ==================================================
+
+ def readable(self):
+ return True
+
+ def writable(self):
+ return True
+
+ # ==================================================
+ # socket object methods.
+ # ==================================================
+
+ def listen(self, num):
+ self.accepting = True
+ if os.name == 'nt' and num > 5:
+ num = 1
+ return self.socket.listen(num)
+
+ def bind(self, addr):
+ self.addr = addr
+ return self.socket.bind(addr)
+
+ def connect(self, address):
+ self.connected = False
+ err = self.socket.connect_ex(address)
+ # XXX Should interpret Winsock return values
+ if err in (EINPROGRESS, EALREADY, EWOULDBLOCK):
+ return
+ if err in (0, EISCONN):
+ self.addr = address
+ self.connected = True
+ self.handle_connect()
+ else:
+ raise socket.error, (err, errorcode[err])
+
+ def accept(self):
+ # XXX can return either an address pair or None
+ try:
+ conn, addr = self.socket.accept()
+ return conn, addr
+ except socket.error, why:
+ if why[0] == EWOULDBLOCK:
+ pass
+ else:
+ raise
+
+ def send(self, data):
+ try:
+ result = self.socket.send(data)
+ return result
+ except socket.error, why:
+ if why[0] == EWOULDBLOCK:
+ return 0
+ else:
+ raise
+ return 0
+
+ def recv(self, buffer_size):
+ try:
+ data = self.socket.recv(buffer_size)
+ if not data:
+ # a closed connection is indicated by signaling
+ # a read condition, and having recv() return 0.
+ self.handle_close()
+ return ''
+ else:
+ return data
+ except socket.error, why:
+ # winsock sometimes throws ENOTCONN
+ if why[0] in [ECONNRESET, ENOTCONN, ESHUTDOWN]:
+ self.handle_close()
+ return ''
+ else:
+ raise
+
+ def close(self):
+ self.del_channel()
+ self.socket.close()
+
+ # cheap inheritance, used to pass all other attribute
+ # references to the underlying socket object.
+ def __getattr__(self, attr):
+ return getattr(self.socket, attr)
+
+ # log and log_info may be overridden to provide more sophisticated
+ # logging and warning methods. In general, log is for 'hit' logging
+ # and 'log_info' is for informational, warning and error logging.
+
+ def log(self, message):
+ sys.stderr.write('log: %s\n' % str(message))
+
+ def log_info(self, message, type='info'):
+ if __debug__ or type != 'info':
+ print '%s: %s' % (type, message)
+
+ def handle_read_event(self):
+ if self.accepting:
+ # for an accepting socket, getting a read implies
+ # that we are connected
+ if not self.connected:
+ self.connected = True
+ self.handle_accept()
+ elif not self.connected:
+ self.handle_connect()
+ self.connected = True
+ self.handle_read()
+ else:
+ self.handle_read()
+
+ def handle_write_event(self):
+ # getting a write implies that we are connected
+ if not self.connected:
+ self.handle_connect()
+ self.connected = True
+ self.handle_write()
+
+ def handle_expt_event(self):
+ self.handle_expt()
+
+ def handle_error(self):
+ nil, t, v, tbinfo = compact_traceback()
+
+ # sometimes a user repr method will crash.
+ try:
+ self_repr = repr(self)
+ except:
+ self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
+
+ self.log_info(
+ 'uncaptured python exception, closing channel %s (%s:%s %s)' % (
+ self_repr,
+ t,
+ v,
+ tbinfo
+ ),
+ 'error'
+ )
+ self.close()
+
+ def handle_expt(self):
+ self.log_info('unhandled exception', 'warning')
+
+ def handle_read(self):
+ self.log_info('unhandled read event', 'warning')
+
+ def handle_write(self):
+ self.log_info('unhandled write event', 'warning')
+
+ def handle_connect(self):
+ self.log_info('unhandled connect event', 'warning')
+
+ def handle_accept(self):
+ self.log_info('unhandled accept event', 'warning')
+
+ def handle_close(self):
+ self.log_info('unhandled close event', 'warning')
+ self.close()
+
+# ---------------------------------------------------------------------------
+# adds simple buffered output capability, useful for simple clients.
+# [for more sophisticated usage use asynchat.async_chat]
+# ---------------------------------------------------------------------------
+
+class dispatcher_with_send(dispatcher):
+
+ def __init__(self, sock=None, map=None):
+ dispatcher.__init__(self, sock, map)
+ self.out_buffer = ''
+
+ def initiate_send(self):
+ num_sent = 0
+ num_sent = dispatcher.send(self, self.out_buffer[:512])
+ self.out_buffer = self.out_buffer[num_sent:]
+
+ def handle_write(self):
+ self.initiate_send()
+
+ def writable(self):
+ return (not self.connected) or len(self.out_buffer)
+
+ def send(self, data):
+ if self.debug:
+ self.log_info('sending %s' % repr(data))
+ self.out_buffer = self.out_buffer + data
+ self.initiate_send()
+
+# ---------------------------------------------------------------------------
+# used for debugging.
+# ---------------------------------------------------------------------------
+
+def compact_traceback():
+ t, v, tb = sys.exc_info()
+ tbinfo = []
+ assert tb # Must have a traceback
+ while tb:
+ tbinfo.append((
+ tb.tb_frame.f_code.co_filename,
+ tb.tb_frame.f_code.co_name,
+ str(tb.tb_lineno)
+ ))
+ tb = tb.tb_next
+
+ # just to be safe
+ del tb
+
+ file, function, line = tbinfo[-1]
+ info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
+ return (file, function, line), t, v, info
+
+def close_all(map=None):
+ if map is None:
+ map = socket_map
+ for x in map.values():
+ x.socket.close()
+ map.clear()
+
+# Asynchronous File I/O:
+#
+# After a little research (reading man pages on various unixen, and
+# digging through the linux kernel), I've determined that select()
+# isn't meant for doing asynchronous file i/o.
+# Heartening, though - reading linux/mm/filemap.c shows that linux
+# supports asynchronous read-ahead. So _MOST_ of the time, the data
+# will be sitting in memory for us already when we go to read it.
+#
+# What other OS's (besides NT) support async file i/o? [VMS?]
+#
+# Regardless, this is useful for pipes, and stdin/stdout...
+
+if os.name == 'posix':
+ import fcntl
+
+ class file_wrapper:
+ # here we override just enough to make a file
+ # look like a socket for the purposes of asyncore.
+
+ def __init__(self, fd):
+ self.fd = fd
+
+ def recv(self, *args):
+ return os.read(self.fd, *args)
+
+ def send(self, *args):
+ return os.write(self.fd, *args)
+
+ read = recv
+ write = send
+
+ def close(self):
+ os.close(self.fd)
+
+ def fileno(self):
+ return self.fd
+
+ class file_dispatcher(dispatcher):
+
+ def __init__(self, fd, map=None):
+ dispatcher.__init__(self, None, map)
+ self.connected = True
+ self.set_file(fd)
+ # set it to non-blocking mode
+ flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
+ flags = flags | os.O_NONBLOCK
+ fcntl.fcntl(fd, fcntl.F_SETFL, flags)
+
+ def set_file(self, fd):
+ self._fileno = fd
+ self.socket = file_wrapper(fd)
+ self.add_channel()
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/atexit.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,62 @@
+"""
+atexit.py - allow programmer to define multiple exit functions to be executed
+upon normal program termination.
+
+One public function, register, is defined.
+"""
+
+__all__ = ["register"]
+
+import sys
+
+_exithandlers = []
+def _run_exitfuncs():
+ """run any registered exit functions
+
+ _exithandlers is traversed in reverse order so functions are executed
+ last in, first out.
+ """
+
+ exc_info = None
+ while _exithandlers:
+ func, targs, kargs = _exithandlers.pop()
+ try:
+ func(*targs, **kargs)
+ except SystemExit:
+ exc_info = sys.exc_info()
+ except:
+ import traceback
+ print >> sys.stderr, "Error in atexit._run_exitfuncs:"
+ traceback.print_exc()
+ exc_info = sys.exc_info()
+
+ if exc_info is not None:
+ raise exc_info[0], exc_info[1], exc_info[2]
+
+
+def register(func, *targs, **kargs):
+ """register a function to be executed upon normal program termination
+
+ func - function to be called at exit
+ targs - optional arguments to pass to func
+ kargs - optional keyword arguments to pass to func
+ """
+ _exithandlers.append((func, targs, kargs))
+
+if hasattr(sys, "exitfunc"):
+ # Assume it's another registered exit function - append it to our list
+ register(sys.exitfunc)
+sys.exitfunc = _run_exitfuncs
+
+if __name__ == "__main__":
+ def x1():
+ print "running x1"
+ def x2(n):
+ print "running x2(%r)" % (n,)
+ def x3(n, kwd=None):
+ print "running x3(%r, kwd=%r)" % (n, kwd)
+
+ register(x1)
+ register(x2, 12)
+ register(x3, 5, "bar")
+ register(x3, "no kwd args")
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/audiodev.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,257 @@
+"""Classes for manipulating audio devices (currently only for Sun and SGI)"""
+
+__all__ = ["error","AudioDev"]
+
+class error(Exception):
+ pass
+
+class Play_Audio_sgi:
+ # Private instance variables
+## if 0: access frameratelist, nchannelslist, sampwidthlist, oldparams, \
+## params, config, inited_outrate, inited_width, \
+## inited_nchannels, port, converter, classinited: private
+
+ classinited = 0
+ frameratelist = nchannelslist = sampwidthlist = None
+
+ def initclass(self):
+ import AL
+ self.frameratelist = [
+ (48000, AL.RATE_48000),
+ (44100, AL.RATE_44100),
+ (32000, AL.RATE_32000),
+ (22050, AL.RATE_22050),
+ (16000, AL.RATE_16000),
+ (11025, AL.RATE_11025),
+ ( 8000, AL.RATE_8000),
+ ]
+ self.nchannelslist = [
+ (1, AL.MONO),
+ (2, AL.STEREO),
+ (4, AL.QUADRO),
+ ]
+ self.sampwidthlist = [
+ (1, AL.SAMPLE_8),
+ (2, AL.SAMPLE_16),
+ (3, AL.SAMPLE_24),
+ ]
+ self.classinited = 1
+
+ def __init__(self):
+ import al, AL
+ if not self.classinited:
+ self.initclass()
+ self.oldparams = []
+ self.params = [AL.OUTPUT_RATE, 0]
+ self.config = al.newconfig()
+ self.inited_outrate = 0
+ self.inited_width = 0
+ self.inited_nchannels = 0
+ self.converter = None
+ self.port = None
+ return
+
+ def __del__(self):
+ if self.port:
+ self.stop()
+ if self.oldparams:
+ import al, AL
+ al.setparams(AL.DEFAULT_DEVICE, self.oldparams)
+ self.oldparams = []
+
+ def wait(self):
+ if not self.port:
+ return
+ import time
+ while self.port.getfilled() > 0:
+ time.sleep(0.1)
+ self.stop()
+
+ def stop(self):
+ if self.port:
+ self.port.closeport()
+ self.port = None
+ if self.oldparams:
+ import al, AL
+ al.setparams(AL.DEFAULT_DEVICE, self.oldparams)
+ self.oldparams = []
+
+ def setoutrate(self, rate):
+ for (raw, cooked) in self.frameratelist:
+ if rate == raw:
+ self.params[1] = cooked
+ self.inited_outrate = 1
+ break
+ else:
+ raise error, 'bad output rate'
+
+ def setsampwidth(self, width):
+ for (raw, cooked) in self.sampwidthlist:
+ if width == raw:
+ self.config.setwidth(cooked)
+ self.inited_width = 1
+ break
+ else:
+ if width == 0:
+ import AL
+ self.inited_width = 0
+ self.config.setwidth(AL.SAMPLE_16)
+ self.converter = self.ulaw2lin
+ else:
+ raise error, 'bad sample width'
+
+ def setnchannels(self, nchannels):
+ for (raw, cooked) in self.nchannelslist:
+ if nchannels == raw:
+ self.config.setchannels(cooked)
+ self.inited_nchannels = 1
+ break
+ else:
+ raise error, 'bad # of channels'
+
+ def writeframes(self, data):
+ if not (self.inited_outrate and self.inited_nchannels):
+ raise error, 'params not specified'
+ if not self.port:
+ import al, AL
+ self.port = al.openport('Python', 'w', self.config)
+ self.oldparams = self.params[:]
+ al.getparams(AL.DEFAULT_DEVICE, self.oldparams)
+ al.setparams(AL.DEFAULT_DEVICE, self.params)
+ if self.converter:
+ data = self.converter(data)
+ self.port.writesamps(data)
+
+ def getfilled(self):
+ if self.port:
+ return self.port.getfilled()
+ else:
+ return 0
+
+ def getfillable(self):
+ if self.port:
+ return self.port.getfillable()
+ else:
+ return self.config.getqueuesize()
+
+ # private methods
+## if 0: access *: private
+
+ def ulaw2lin(self, data):
+ import audioop
+ return audioop.ulaw2lin(data, 2)
+
+class Play_Audio_sun:
+## if 0: access outrate, sampwidth, nchannels, inited_outrate, inited_width, \
+## inited_nchannels, converter: private
+
+ def __init__(self):
+ self.outrate = 0
+ self.sampwidth = 0
+ self.nchannels = 0
+ self.inited_outrate = 0
+ self.inited_width = 0
+ self.inited_nchannels = 0
+ self.converter = None
+ self.port = None
+ return
+
+ def __del__(self):
+ self.stop()
+
+ def setoutrate(self, rate):
+ self.outrate = rate
+ self.inited_outrate = 1
+
+ def setsampwidth(self, width):
+ self.sampwidth = width
+ self.inited_width = 1
+
+ def setnchannels(self, nchannels):
+ self.nchannels = nchannels
+ self.inited_nchannels = 1
+
+ def writeframes(self, data):
+ if not (self.inited_outrate and self.inited_width and self.inited_nchannels):
+ raise error, 'params not specified'
+ if not self.port:
+ import sunaudiodev, SUNAUDIODEV
+ self.port = sunaudiodev.open('w')
+ info = self.port.getinfo()
+ info.o_sample_rate = self.outrate
+ info.o_channels = self.nchannels
+ if self.sampwidth == 0:
+ info.o_precision = 8
+ self.o_encoding = SUNAUDIODEV.ENCODING_ULAW
+ # XXX Hack, hack -- leave defaults
+ else:
+ info.o_precision = 8 * self.sampwidth
+ info.o_encoding = SUNAUDIODEV.ENCODING_LINEAR
+ self.port.setinfo(info)
+ if self.converter:
+ data = self.converter(data)
+ self.port.write(data)
+
+ def wait(self):
+ if not self.port:
+ return
+ self.port.drain()
+ self.stop()
+
+ def stop(self):
+ if self.port:
+ self.port.flush()
+ self.port.close()
+ self.port = None
+
+ def getfilled(self):
+ if self.port:
+ return self.port.obufcount()
+ else:
+ return 0
+
+## # Nobody remembers what this method does, and it's broken. :-(
+## def getfillable(self):
+## return BUFFERSIZE - self.getfilled()
+
+def AudioDev():
+ # Dynamically try to import and use a platform specific module.
+ try:
+ import al
+ except ImportError:
+ try:
+ import sunaudiodev
+ return Play_Audio_sun()
+ except ImportError:
+ try:
+ import Audio_mac
+ except ImportError:
+ raise error, 'no audio device'
+ else:
+ return Audio_mac.Play_Audio_mac()
+ else:
+ return Play_Audio_sgi()
+
+def test(fn = None):
+ import sys
+ if sys.argv[1:]:
+ fn = sys.argv[1]
+ else:
+ fn = 'f:just samples:just.aif'
+ import aifc
+ af = aifc.open(fn, 'r')
+ print fn, af.getparams()
+ p = AudioDev()
+ p.setoutrate(af.getframerate())
+ p.setsampwidth(af.getsampwidth())
+ p.setnchannels(af.getnchannels())
+ BUFSIZ = af.getframerate()/af.getsampwidth()/af.getnchannels()
+ while 1:
+ data = af.readframes(BUFSIZ)
+ if not data: break
+ print len(data)
+ p.writeframes(data)
+ p.wait()
+
+if __name__ == '__main__':
+ test()
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/base64.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,359 @@
+#! /usr/bin/env python
+
+"""RFC 3548: Base16, Base32, Base64 Data Encodings"""
+
+# Modified 04-Oct-1995 by Jack Jansen to use binascii module
+# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
+
+import re
+import struct
+import binascii
+
+
+__all__ = [
+ # Legacy interface exports traditional RFC 1521 Base64 encodings
+ 'encode', 'decode', 'encodestring', 'decodestring',
+ # Generalized interface for other encodings
+ 'b64encode', 'b64decode', 'b32encode', 'b32decode',
+ 'b16encode', 'b16decode',
+ # Standard Base64 encoding
+ 'standard_b64encode', 'standard_b64decode',
+ # Some common Base64 alternatives. As referenced by RFC 3458, see thread
+ # starting at:
+ #
+ # http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
+ 'urlsafe_b64encode', 'urlsafe_b64decode',
+ ]
+
+_translation = [chr(_x) for _x in range(256)]
+EMPTYSTRING = ''
+
+
+def _translate(s, altchars):
+ translation = _translation[:]
+ for k, v in altchars.items():
+ translation[ord(k)] = v
+ return s.translate(''.join(translation))
+
+
+
+# Base64 encoding/decoding uses binascii
+
+def b64encode(s, altchars=None):
+ """Encode a string using Base64.
+
+ s is the string to encode. Optional altchars must be a string of at least
+ length 2 (additional characters are ignored) which specifies an
+ alternative alphabet for the '+' and '/' characters. This allows an
+ application to e.g. generate url or filesystem safe Base64 strings.
+
+ The encoded string is returned.
+ """
+ # Strip off the trailing newline
+ encoded = binascii.b2a_base64(s)[:-1]
+ if altchars is not None:
+ return _translate(encoded, {'+': altchars[0], '/': altchars[1]})
+ return encoded
+
+
+def b64decode(s, altchars=None):
+ """Decode a Base64 encoded string.
+
+ s is the string to decode. Optional altchars must be a string of at least
+ length 2 (additional characters are ignored) which specifies the
+ alternative alphabet used instead of the '+' and '/' characters.
+
+ The decoded string is returned. A TypeError is raised if s were
+ incorrectly padded or if there are non-alphabet characters present in the
+ string.
+ """
+ if altchars is not None:
+ s = _translate(s, {altchars[0]: '+', altchars[1]: '/'})
+ try:
+ return binascii.a2b_base64(s)
+ except binascii.Error, msg:
+ # Transform this exception for consistency
+ raise TypeError(msg)
+
+
+def standard_b64encode(s):
+ """Encode a string using the standard Base64 alphabet.
+
+ s is the string to encode. The encoded string is returned.
+ """
+ return b64encode(s)
+
+def standard_b64decode(s):
+ """Decode a string encoded with the standard Base64 alphabet.
+
+ s is the string to decode. The decoded string is returned. A TypeError
+ is raised if the string is incorrectly padded or if there are non-alphabet
+ characters present in the string.
+ """
+ return b64decode(s)
+
+def urlsafe_b64encode(s):
+ """Encode a string using a url-safe Base64 alphabet.
+
+ s is the string to encode. The encoded string is returned. The alphabet
+ uses '-' instead of '+' and '_' instead of '/'.
+ """
+ return b64encode(s, '-_')
+
+def urlsafe_b64decode(s):
+ """Decode a string encoded with the standard Base64 alphabet.
+
+ s is the string to decode. The decoded string is returned. A TypeError
+ is raised if the string is incorrectly padded or if there are non-alphabet
+ characters present in the string.
+
+ The alphabet uses '-' instead of '+' and '_' instead of '/'.
+ """
+ return b64decode(s, '-_')
+
+
+
+# Base32 encoding/decoding must be done in Python
+_b32alphabet = {
+ 0: 'A', 9: 'J', 18: 'S', 27: '3',
+ 1: 'B', 10: 'K', 19: 'T', 28: '4',
+ 2: 'C', 11: 'L', 20: 'U', 29: '5',
+ 3: 'D', 12: 'M', 21: 'V', 30: '6',
+ 4: 'E', 13: 'N', 22: 'W', 31: '7',
+ 5: 'F', 14: 'O', 23: 'X',
+ 6: 'G', 15: 'P', 24: 'Y',
+ 7: 'H', 16: 'Q', 25: 'Z',
+ 8: 'I', 17: 'R', 26: '2',
+ }
+
+_b32tab = _b32alphabet.items()
+_b32tab.sort()
+_b32tab = [v for k, v in _b32tab]
+_b32rev = dict([(v, long(k)) for k, v in _b32alphabet.items()])
+
+
+def b32encode(s):
+ """Encode a string using Base32.
+
+ s is the string to encode. The encoded string is returned.
+ """
+ parts = []
+ quanta, leftover = divmod(len(s), 5)
+ # Pad the last quantum with zero bits if necessary
+ if leftover:
+ s += ('\0' * (5 - leftover))
+ quanta += 1
+ for i in range(quanta):
+ # c1 and c2 are 16 bits wide, c3 is 8 bits wide. The intent of this
+ # code is to process the 40 bits in units of 5 bits. So we take the 1
+ # leftover bit of c1 and tack it onto c2. Then we take the 2 leftover
+ # bits of c2 and tack them onto c3. The shifts and masks are intended
+ # to give us values of exactly 5 bits in width.
+ c1, c2, c3 = struct.unpack('!HHB', s[i*5:(i+1)*5])
+ c2 += (c1 & 1) << 16 # 17 bits wide
+ c3 += (c2 & 3) << 8 # 10 bits wide
+ parts.extend([_b32tab[c1 >> 11], # bits 1 - 5
+ _b32tab[(c1 >> 6) & 0x1f], # bits 6 - 10
+ _b32tab[(c1 >> 1) & 0x1f], # bits 11 - 15
+ _b32tab[c2 >> 12], # bits 16 - 20 (1 - 5)
+ _b32tab[(c2 >> 7) & 0x1f], # bits 21 - 25 (6 - 10)
+ _b32tab[(c2 >> 2) & 0x1f], # bits 26 - 30 (11 - 15)
+ _b32tab[c3 >> 5], # bits 31 - 35 (1 - 5)
+ _b32tab[c3 & 0x1f], # bits 36 - 40 (1 - 5)
+ ])
+ encoded = EMPTYSTRING.join(parts)
+ # Adjust for any leftover partial quanta
+ if leftover == 1:
+ return encoded[:-6] + '======'
+ elif leftover == 2:
+ return encoded[:-4] + '===='
+ elif leftover == 3:
+ return encoded[:-3] + '==='
+ elif leftover == 4:
+ return encoded[:-1] + '='
+ return encoded
+
+
+def b32decode(s, casefold=False, map01=None):
+ """Decode a Base32 encoded string.
+
+ s is the string to decode. Optional casefold is a flag specifying whether
+ a lowercase alphabet is acceptable as input. For security purposes, the
+ default is False.
+
+ RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O
+ (oh), and for optional mapping of the digit 1 (one) to either the letter I
+ (eye) or letter L (el). The optional argument map01 when not None,
+ specifies which letter the digit 1 should be mapped to (when map01 is not
+ None, the digit 0 is always mapped to the letter O). For security
+ purposes the default is None, so that 0 and 1 are not allowed in the
+ input.
+
+ The decoded string is returned. A TypeError is raised if s were
+ incorrectly padded or if there are non-alphabet characters present in the
+ string.
+ """
+ quanta, leftover = divmod(len(s), 8)
+ if leftover:
+ raise TypeError('Incorrect padding')
+ # Handle section 2.4 zero and one mapping. The flag map01 will be either
+ # False, or the character to map the digit 1 (one) to. It should be
+ # either L (el) or I (eye).
+ if map01:
+ s = _translate(s, {'0': 'O', '1': map01})
+ if casefold:
+ s = s.upper()
+ # Strip off pad characters from the right. We need to count the pad
+ # characters because this will tell us how many null bytes to remove from
+ # the end of the decoded string.
+ padchars = 0
+ mo = re.search('(?P<pad>[=]*)$', s)
+ if mo:
+ padchars = len(mo.group('pad'))
+ if padchars > 0:
+ s = s[:-padchars]
+ # Now decode the full quanta
+ parts = []
+ acc = 0
+ shift = 35
+ for c in s:
+ val = _b32rev.get(c)
+ if val is None:
+ raise TypeError('Non-base32 digit found')
+ acc += _b32rev[c] << shift
+ shift -= 5
+ if shift < 0:
+ parts.append(binascii.unhexlify('%010x' % acc))
+ acc = 0
+ shift = 35
+ # Process the last, partial quanta
+ last = binascii.unhexlify('%010x' % acc)
+ if padchars == 0:
+ last = '' # No characters
+ elif padchars == 1:
+ last = last[:-1]
+ elif padchars == 3:
+ last = last[:-2]
+ elif padchars == 4:
+ last = last[:-3]
+ elif padchars == 6:
+ last = last[:-4]
+ else:
+ raise TypeError('Incorrect padding')
+ parts.append(last)
+ return EMPTYSTRING.join(parts)
+
+
+
+# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
+# lowercase. The RFC also recommends against accepting input case
+# insensitively.
+def b16encode(s):
+ """Encode a string using Base16.
+
+ s is the string to encode. The encoded string is returned.
+ """
+ return binascii.hexlify(s).upper()
+
+
+def b16decode(s, casefold=False):
+ """Decode a Base16 encoded string.
+
+ s is the string to decode. Optional casefold is a flag specifying whether
+ a lowercase alphabet is acceptable as input. For security purposes, the
+ default is False.
+
+ The decoded string is returned. A TypeError is raised if s were
+ incorrectly padded or if there are non-alphabet characters present in the
+ string.
+ """
+ if casefold:
+ s = s.upper()
+ if re.search('[^0-9A-F]', s):
+ raise TypeError('Non-base16 digit found')
+ return binascii.unhexlify(s)
+
+
+
+# Legacy interface. This code could be cleaned up since I don't believe
+# binascii has any line length limitations. It just doesn't seem worth it
+# though.
+
+MAXLINESIZE = 76 # Excluding the CRLF
+MAXBINSIZE = (MAXLINESIZE//4)*3
+
+def encode(input, output):
+ """Encode a file."""
+ while True:
+ s = input.read(MAXBINSIZE)
+ if not s:
+ break
+ while len(s) < MAXBINSIZE:
+ ns = input.read(MAXBINSIZE-len(s))
+ if not ns:
+ break
+ s += ns
+ line = binascii.b2a_base64(s)
+ output.write(line)
+
+
+def decode(input, output):
+ """Decode a file."""
+ while True:
+ line = input.readline()
+ if not line:
+ break
+ s = binascii.a2b_base64(line)
+ output.write(s)
+
+
+def encodestring(s):
+ """Encode a string."""
+ pieces = []
+ for i in range(0, len(s), MAXBINSIZE):
+ chunk = s[i : i + MAXBINSIZE]
+ pieces.append(binascii.b2a_base64(chunk))
+ return "".join(pieces)
+
+
+def decodestring(s):
+ """Decode a string."""
+ return binascii.a2b_base64(s)
+
+
+
+# Useable as a script...
+def test():
+ """Small test program"""
+ import sys, getopt
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], 'deut')
+ except getopt.error, msg:
+ sys.stdout = sys.stderr
+ print msg
+ print """usage: %s [-d|-e|-u|-t] [file|-]
+ -d, -u: decode
+ -e: encode (default)
+ -t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0]
+ sys.exit(2)
+ func = encode
+ for o, a in opts:
+ if o == '-e': func = encode
+ if o == '-d': func = decode
+ if o == '-u': func = decode
+ if o == '-t': test1(); return
+ if args and args[0] != '-':
+ func(open(args[0], 'rb'), sys.stdout)
+ else:
+ func(sys.stdin, sys.stdout)
+
+
+def test1():
+ s0 = "Aladdin:open sesame"
+ s1 = encodestring(s0)
+ s2 = decodestring(s1)
+ print s0, repr(s1), s2
+
+
+if __name__ == '__main__':
+ test()
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/bdb.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,613 @@
+"""Debugger basics"""
+
+import sys
+import os
+import types
+
+__all__ = ["BdbQuit","Bdb","Breakpoint"]
+
+class BdbQuit(Exception):
+ """Exception to give up completely"""
+
+
+class Bdb:
+
+ """Generic Python debugger base class.
+
+ This class takes care of details of the trace facility;
+ a derived class should implement user interaction.
+ The standard debugger class (pdb.Pdb) is an example.
+ """
+
+ def __init__(self):
+ self.breaks = {}
+ self.fncache = {}
+
+ def canonic(self, filename):
+ if filename == "<" + filename[1:-1] + ">":
+ return filename
+ canonic = self.fncache.get(filename)
+ if not canonic:
+ canonic = os.path.abspath(filename)
+ canonic = os.path.normcase(canonic)
+ self.fncache[filename] = canonic
+ return canonic
+
+ def reset(self):
+ import linecache
+ linecache.checkcache()
+ self.botframe = None
+ self.stopframe = None
+ self.returnframe = None
+ self.quitting = 0
+
+ def trace_dispatch(self, frame, event, arg):
+ if self.quitting:
+ return # None
+ if event == 'line':
+ return self.dispatch_line(frame)
+ if event == 'call':
+ return self.dispatch_call(frame, arg)
+ if event == 'return':
+ return self.dispatch_return(frame, arg)
+ if event == 'exception':
+ return self.dispatch_exception(frame, arg)
+ if event == 'c_call':
+ return self.trace_dispatch
+ if event == 'c_exception':
+ return self.trace_dispatch
+ if event == 'c_return':
+ return self.trace_dispatch
+ print 'bdb.Bdb.dispatch: unknown debugging event:', repr(event)
+ return self.trace_dispatch
+
+ def dispatch_line(self, frame):
+ if self.stop_here(frame) or self.break_here(frame):
+ self.user_line(frame)
+ if self.quitting: raise BdbQuit
+ return self.trace_dispatch
+
+ def dispatch_call(self, frame, arg):
+ # XXX 'arg' is no longer used
+ if self.botframe is None:
+ # First call of dispatch since reset()
+ self.botframe = frame.f_back # (CT) Note that this may also be None!
+ return self.trace_dispatch
+ if not (self.stop_here(frame) or self.break_anywhere(frame)):
+ # No need to trace this function
+ return # None
+ self.user_call(frame, arg)
+ if self.quitting: raise BdbQuit
+ return self.trace_dispatch
+
+ def dispatch_return(self, frame, arg):
+ if self.stop_here(frame) or frame == self.returnframe:
+ self.user_return(frame, arg)
+ if self.quitting: raise BdbQuit
+ return self.trace_dispatch
+
+ def dispatch_exception(self, frame, arg):
+ if self.stop_here(frame):
+ self.user_exception(frame, arg)
+ if self.quitting: raise BdbQuit
+ return self.trace_dispatch
+
+ # Normally derived classes don't override the following
+ # methods, but they may if they want to redefine the
+ # definition of stopping and breakpoints.
+
+ def stop_here(self, frame):
+ # (CT) stopframe may now also be None, see dispatch_call.
+ # (CT) the former test for None is therefore removed from here.
+ if frame is self.stopframe:
+ return True
+ while frame is not None and frame is not self.stopframe:
+ if frame is self.botframe:
+ return True
+ frame = frame.f_back
+ return False
+
+ def break_here(self, frame):
+ filename = self.canonic(frame.f_code.co_filename)
+ if not filename in self.breaks:
+ return False
+ lineno = frame.f_lineno
+ if not lineno in self.breaks[filename]:
+ # The line itself has no breakpoint, but maybe the line is the
+ # first line of a function with breakpoint set by function name.
+ lineno = frame.f_code.co_firstlineno
+ if not lineno in self.breaks[filename]:
+ return False
+
+ # flag says ok to delete temp. bp
+ (bp, flag) = effective(filename, lineno, frame)
+ if bp:
+ self.currentbp = bp.number
+ if (flag and bp.temporary):
+ self.do_clear(str(bp.number))
+ return True
+ else:
+ return False
+
+ def do_clear(self, arg):
+ raise NotImplementedError, "subclass of bdb must implement do_clear()"
+
+ def break_anywhere(self, frame):
+ return self.breaks.has_key(
+ self.canonic(frame.f_code.co_filename))
+
+ # Derived classes should override the user_* methods
+ # to gain control.
+
+ def user_call(self, frame, argument_list):
+ """This method is called when there is the remote possibility
+ that we ever need to stop in this function."""
+ pass
+
+ def user_line(self, frame):
+ """This method is called when we stop or break at this line."""
+ pass
+
+ def user_return(self, frame, return_value):
+ """This method is called when a return trap is set here."""
+ pass
+
+ def user_exception(self, frame, (exc_type, exc_value, exc_traceback)):
+ """This method is called if an exception occurs,
+ but only if we are to stop at or just below this level."""
+ pass
+
+ # Derived classes and clients can call the following methods
+ # to affect the stepping state.
+
+ def set_step(self):
+ """Stop after one line of code."""
+ self.stopframe = None
+ self.returnframe = None
+ self.quitting = 0
+
+ def set_next(self, frame):
+ """Stop on the next line in or below the given frame."""
+ self.stopframe = frame
+ self.returnframe = None
+ self.quitting = 0
+
+ def set_return(self, frame):
+ """Stop when returning from the given frame."""
+ self.stopframe = frame.f_back
+ self.returnframe = frame
+ self.quitting = 0
+
+ def set_trace(self, frame=None):
+ """Start debugging from `frame`.
+
+ If frame is not specified, debugging starts from caller's frame.
+ """
+ if frame is None:
+ frame = sys._getframe().f_back
+ self.reset()
+ while frame:
+ frame.f_trace = self.trace_dispatch
+ self.botframe = frame
+ frame = frame.f_back
+ self.set_step()
+ sys.settrace(self.trace_dispatch)
+
+ def set_continue(self):
+ # Don't stop except at breakpoints or when finished
+ self.stopframe = self.botframe
+ self.returnframe = None
+ self.quitting = 0
+ if not self.breaks:
+ # no breakpoints; run without debugger overhead
+ sys.settrace(None)
+ frame = sys._getframe().f_back
+ while frame and frame is not self.botframe:
+ del frame.f_trace
+ frame = frame.f_back
+
+ def set_quit(self):
+ self.stopframe = self.botframe
+ self.returnframe = None
+ self.quitting = 1
+ sys.settrace(None)
+
+ # Derived classes and clients can call the following methods
+ # to manipulate breakpoints. These methods return an
+ # error message is something went wrong, None if all is well.
+ # Set_break prints out the breakpoint line and file:lineno.
+ # Call self.get_*break*() to see the breakpoints or better
+ # for bp in Breakpoint.bpbynumber: if bp: bp.bpprint().
+
+ def set_break(self, filename, lineno, temporary=0, cond = None,
+ funcname=None):
+ filename = self.canonic(filename)
+ import linecache # Import as late as possible
+ line = linecache.getline(filename, lineno)
+ if not line:
+ return 'Line %s:%d does not exist' % (filename,
+ lineno)
+ if not filename in self.breaks:
+ self.breaks[filename] = []
+ list = self.breaks[filename]
+ if not lineno in list:
+ list.append(lineno)
+ bp = Breakpoint(filename, lineno, temporary, cond, funcname)
+
+ def clear_break(self, filename, lineno):
+ filename = self.canonic(filename)
+ if not filename in self.breaks:
+ return 'There are no breakpoints in %s' % filename
+ if lineno not in self.breaks[filename]:
+ return 'There is no breakpoint at %s:%d' % (filename,
+ lineno)
+ # If there's only one bp in the list for that file,line
+ # pair, then remove the breaks entry
+ for bp in Breakpoint.bplist[filename, lineno][:]:
+ bp.deleteMe()
+ if not Breakpoint.bplist.has_key((filename, lineno)):
+ self.breaks[filename].remove(lineno)
+ if not self.breaks[filename]:
+ del self.breaks[filename]
+
+ def clear_bpbynumber(self, arg):
+ try:
+ number = int(arg)
+ except:
+ return 'Non-numeric breakpoint number (%s)' % arg
+ try:
+ bp = Breakpoint.bpbynumber[number]
+ except IndexError:
+ return 'Breakpoint number (%d) out of range' % number
+ if not bp:
+ return 'Breakpoint (%d) already deleted' % number
+ self.clear_break(bp.file, bp.line)
+
+ def clear_all_file_breaks(self, filename):
+ filename = self.canonic(filename)
+ if not filename in self.breaks:
+ return 'There are no breakpoints in %s' % filename
+ for line in self.breaks[filename]:
+ blist = Breakpoint.bplist[filename, line]
+ for bp in blist:
+ bp.deleteMe()
+ del self.breaks[filename]
+
+ def clear_all_breaks(self):
+ if not self.breaks:
+ return 'There are no breakpoints'
+ for bp in Breakpoint.bpbynumber:
+ if bp:
+ bp.deleteMe()
+ self.breaks = {}
+
+ def get_break(self, filename, lineno):
+ filename = self.canonic(filename)
+ return filename in self.breaks and \
+ lineno in self.breaks[filename]
+
+ def get_breaks(self, filename, lineno):
+ filename = self.canonic(filename)
+ return filename in self.breaks and \
+ lineno in self.breaks[filename] and \
+ Breakpoint.bplist[filename, lineno] or []
+
+ def get_file_breaks(self, filename):
+ filename = self.canonic(filename)
+ if filename in self.breaks:
+ return self.breaks[filename]
+ else:
+ return []
+
+ def get_all_breaks(self):
+ return self.breaks
+
+ # Derived classes and clients can call the following method
+ # to get a data structure representing a stack trace.
+
+ def get_stack(self, f, t):
+ stack = []
+ if t and t.tb_frame is f:
+ t = t.tb_next
+ while f is not None:
+ stack.append((f, f.f_lineno))
+ if f is self.botframe:
+ break
+ f = f.f_back
+ stack.reverse()
+ i = max(0, len(stack) - 1)
+ while t is not None:
+ stack.append((t.tb_frame, t.tb_lineno))
+ t = t.tb_next
+ return stack, i
+
+ #
+
+ def format_stack_entry(self, frame_lineno, lprefix=': '):
+ import linecache, repr
+ frame, lineno = frame_lineno
+ filename = self.canonic(frame.f_code.co_filename)
+ s = '%s(%r)' % (filename, lineno)
+ if frame.f_code.co_name:
+ s = s + frame.f_code.co_name
+ else:
+ s = s + "<lambda>"
+ if '__args__' in frame.f_locals:
+ args = frame.f_locals['__args__']
+ else:
+ args = None
+ if args:
+ s = s + repr.repr(args)
+ else:
+ s = s + '()'
+ if '__return__' in frame.f_locals:
+ rv = frame.f_locals['__return__']
+ s = s + '->'
+ s = s + repr.repr(rv)
+ line = linecache.getline(filename, lineno)
+ if line: s = s + lprefix + line.strip()
+ return s
+
+ # The following two methods can be called by clients to use
+ # a debugger to debug a statement, given as a string.
+
+ def run(self, cmd, globals=None, locals=None):
+ if globals is None:
+ import __main__
+ globals = __main__.__dict__
+ if locals is None:
+ locals = globals
+ self.reset()
+ sys.settrace(self.trace_dispatch)
+ if not isinstance(cmd, types.CodeType):
+ cmd = cmd+'\n'
+ try:
+ try:
+ exec cmd in globals, locals
+ except BdbQuit:
+ pass
+ finally:
+ self.quitting = 1
+ sys.settrace(None)
+
+ def runeval(self, expr, globals=None, locals=None):
+ if globals is None:
+ import __main__
+ globals = __main__.__dict__
+ if locals is None:
+ locals = globals
+ self.reset()
+ sys.settrace(self.trace_dispatch)
+ if not isinstance(expr, types.CodeType):
+ expr = expr+'\n'
+ try:
+ try:
+ return eval(expr, globals, locals)
+ except BdbQuit:
+ pass
+ finally:
+ self.quitting = 1
+ sys.settrace(None)
+
+ def runctx(self, cmd, globals, locals):
+ # B/W compatibility
+ self.run(cmd, globals, locals)
+
+ # This method is more useful to debug a single function call.
+
+ def runcall(self, func, *args, **kwds):
+ self.reset()
+ sys.settrace(self.trace_dispatch)
+ res = None
+ try:
+ try:
+ res = func(*args, **kwds)
+ except BdbQuit:
+ pass
+ finally:
+ self.quitting = 1
+ sys.settrace(None)
+ return res
+
+
+def set_trace():
+ Bdb().set_trace()
+
+
+class Breakpoint:
+
+ """Breakpoint class
+
+ Implements temporary breakpoints, ignore counts, disabling and
+ (re)-enabling, and conditionals.
+
+ Breakpoints are indexed by number through bpbynumber and by
+ the file,line tuple using bplist. The former points to a
+ single instance of class Breakpoint. The latter points to a
+ list of such instances since there may be more than one
+ breakpoint per line.
+
+ """
+
+ # XXX Keeping state in the class is a mistake -- this means
+ # you cannot have more than one active Bdb instance.
+
+ next = 1 # Next bp to be assigned
+ bplist = {} # indexed by (file, lineno) tuple
+ bpbynumber = [None] # Each entry is None or an instance of Bpt
+ # index 0 is unused, except for marking an
+ # effective break .... see effective()
+
+ def __init__(self, file, line, temporary=0, cond=None, funcname=None):
+ self.funcname = funcname
+ # Needed if funcname is not None.
+ self.func_first_executable_line = None
+ self.file = file # This better be in canonical form!
+ self.line = line
+ self.temporary = temporary
+ self.cond = cond
+ self.enabled = 1
+ self.ignore = 0
+ self.hits = 0
+ self.number = Breakpoint.next
+ Breakpoint.next = Breakpoint.next + 1
+ # Build the two lists
+ self.bpbynumber.append(self)
+ if self.bplist.has_key((file, line)):
+ self.bplist[file, line].append(self)
+ else:
+ self.bplist[file, line] = [self]
+
+
+ def deleteMe(self):
+ index = (self.file, self.line)
+ self.bpbynumber[self.number] = None # No longer in list
+ self.bplist[index].remove(self)
+ if not self.bplist[index]:
+ # No more bp for this f:l combo
+ del self.bplist[index]
+
+ def enable(self):
+ self.enabled = 1
+
+ def disable(self):
+ self.enabled = 0
+
+ def bpprint(self, out=None):
+ if out is None:
+ out = sys.stdout
+ if self.temporary:
+ disp = 'del '
+ else:
+ disp = 'keep '
+ if self.enabled:
+ disp = disp + 'yes '
+ else:
+ disp = disp + 'no '
+ print >>out, '%-4dbreakpoint %s at %s:%d' % (self.number, disp,
+ self.file, self.line)
+ if self.cond:
+ print >>out, '\tstop only if %s' % (self.cond,)
+ if self.ignore:
+ print >>out, '\tignore next %d hits' % (self.ignore)
+ if (self.hits):
+ if (self.hits > 1): ss = 's'
+ else: ss = ''
+ print >>out, ('\tbreakpoint already hit %d time%s' %
+ (self.hits, ss))
+
+# -----------end of Breakpoint class----------
+
+def checkfuncname(b, frame):
+ """Check whether we should break here because of `b.funcname`."""
+ if not b.funcname:
+ # Breakpoint was set via line number.
+ if b.line != frame.f_lineno:
+ # Breakpoint was set at a line with a def statement and the function
+ # defined is called: don't break.
+ return False
+ return True
+
+ # Breakpoint set via function name.
+
+ if frame.f_code.co_name != b.funcname:
+ # It's not a function call, but rather execution of def statement.
+ return False
+
+ # We are in the right frame.
+ if not b.func_first_executable_line:
+ # The function is entered for the 1st time.
+ b.func_first_executable_line = frame.f_lineno
+
+ if b.func_first_executable_line != frame.f_lineno:
+ # But we are not at the first line number: don't break.
+ return False
+ return True
+
+# Determines if there is an effective (active) breakpoint at this
+# line of code. Returns breakpoint number or 0 if none
+def effective(file, line, frame):
+ """Determine which breakpoint for this file:line is to be acted upon.
+
+ Called only if we know there is a bpt at this
+ location. Returns breakpoint that was triggered and a flag
+ that indicates if it is ok to delete a temporary bp.
+
+ """
+ possibles = Breakpoint.bplist[file,line]
+ for i in range(0, len(possibles)):
+ b = possibles[i]
+ if b.enabled == 0:
+ continue
+ if not checkfuncname(b, frame):
+ continue
+ # Count every hit when bp is enabled
+ b.hits = b.hits + 1
+ if not b.cond:
+ # If unconditional, and ignoring,
+ # go on to next, else break
+ if b.ignore > 0:
+ b.ignore = b.ignore -1
+ continue
+ else:
+ # breakpoint and marker that's ok
+ # to delete if temporary
+ return (b,1)
+ else:
+ # Conditional bp.
+ # Ignore count applies only to those bpt hits where the
+ # condition evaluates to true.
+ try:
+ val = eval(b.cond, frame.f_globals,
+ frame.f_locals)
+ if val:
+ if b.ignore > 0:
+ b.ignore = b.ignore -1
+ # continue
+ else:
+ return (b,1)
+ # else:
+ # continue
+ except:
+ # if eval fails, most conservative
+ # thing is to stop on breakpoint
+ # regardless of ignore count.
+ # Don't delete temporary,
+ # as another hint to user.
+ return (b,0)
+ return (None, None)
+
+# -------------------- testing --------------------
+
+class Tdb(Bdb):
+ def user_call(self, frame, args):
+ name = frame.f_code.co_name
+ if not name: name = '???'
+ print '+++ call', name, args
+ def user_line(self, frame):
+ import linecache
+ name = frame.f_code.co_name
+ if not name: name = '???'
+ fn = self.canonic(frame.f_code.co_filename)
+ line = linecache.getline(fn, frame.f_lineno)
+ print '+++', fn, frame.f_lineno, name, ':', line.strip()
+ def user_return(self, frame, retval):
+ print '+++ return', retval
+ def user_exception(self, frame, exc_stuff):
+ print '+++ exception', exc_stuff
+ self.set_continue()
+
+def foo(n):
+ print 'foo(', n, ')'
+ x = bar(n*10)
+ print 'bar returned', x
+
+def bar(a):
+ print 'bar(', a, ')'
+ return a/2
+
+def test():
+ t = Tdb()
+ t.run('import bdb; bdb.foo(10)')
+
+# end
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/binhex.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,527 @@
+"""Macintosh binhex compression/decompression.
+
+easy interface:
+binhex(inputfilename, outputfilename)
+hexbin(inputfilename, outputfilename)
+"""
+
+#
+# Jack Jansen, CWI, August 1995.
+#
+# The module is supposed to be as compatible as possible. Especially the
+# easy interface should work "as expected" on any platform.
+# XXXX Note: currently, textfiles appear in mac-form on all platforms.
+# We seem to lack a simple character-translate in python.
+# (we should probably use ISO-Latin-1 on all but the mac platform).
+# XXXX The simple routines are too simple: they expect to hold the complete
+# files in-core. Should be fixed.
+# XXXX It would be nice to handle AppleDouble format on unix
+# (for servers serving macs).
+# XXXX I don't understand what happens when you get 0x90 times the same byte on
+# input. The resulting code (xx 90 90) would appear to be interpreted as an
+# escaped *value* of 0x90. All coders I've seen appear to ignore this nicety...
+#
+import sys
+import os
+import struct
+import binascii
+
+__all__ = ["binhex","hexbin","Error"]
+
+class Error(Exception):
+ pass
+
+# States (what have we written)
+[_DID_HEADER, _DID_DATA, _DID_RSRC] = range(3)
+
+# Various constants
+REASONABLY_LARGE=32768 # Minimal amount we pass the rle-coder
+LINELEN=64
+RUNCHAR=chr(0x90) # run-length introducer
+
+#
+# This code is no longer byte-order dependent
+
+#
+# Workarounds for non-mac machines.
+try:
+ from Carbon.File import FSSpec, FInfo
+ from MacOS import openrf
+
+ def getfileinfo(name):
+ finfo = FSSpec(name).FSpGetFInfo()
+ dir, file = os.path.split(name)
+ # XXX Get resource/data sizes
+ fp = open(name, 'rb')
+ fp.seek(0, 2)
+ dlen = fp.tell()
+ fp = openrf(name, '*rb')
+ fp.seek(0, 2)
+ rlen = fp.tell()
+ return file, finfo, dlen, rlen
+
+ def openrsrc(name, *mode):
+ if not mode:
+ mode = '*rb'
+ else:
+ mode = '*' + mode[0]
+ return openrf(name, mode)
+
+except ImportError:
+ #
+ # Glue code for non-macintosh usage
+ #
+
+ class FInfo:
+ def __init__(self):
+ self.Type = '????'
+ self.Creator = '????'
+ self.Flags = 0
+
+ def getfileinfo(name):
+ finfo = FInfo()
+ # Quick check for textfile
+ fp = open(name)
+ data = open(name).read(256)
+ for c in data:
+ if not c.isspace() and (c<' ' or ord(c) > 0x7f):
+ break
+ else:
+ finfo.Type = 'TEXT'
+ fp.seek(0, 2)
+ dsize = fp.tell()
+ fp.close()
+ dir, file = os.path.split(name)
+ file = file.replace(':', '-', 1)
+ return file, finfo, dsize, 0
+
+ class openrsrc:
+ def __init__(self, *args):
+ pass
+
+ def read(self, *args):
+ return ''
+
+ def write(self, *args):
+ pass
+
+ def close(self):
+ pass
+
+class _Hqxcoderengine:
+ """Write data to the coder in 3-byte chunks"""
+
+ def __init__(self, ofp):
+ self.ofp = ofp
+ self.data = ''
+ self.hqxdata = ''
+ self.linelen = LINELEN-1
+
+ def write(self, data):
+ self.data = self.data + data
+ datalen = len(self.data)
+ todo = (datalen//3)*3
+ data = self.data[:todo]
+ self.data = self.data[todo:]
+ if not data:
+ return
+ self.hqxdata = self.hqxdata + binascii.b2a_hqx(data)
+ self._flush(0)
+
+ def _flush(self, force):
+ first = 0
+ while first <= len(self.hqxdata)-self.linelen:
+ last = first + self.linelen
+ self.ofp.write(self.hqxdata[first:last]+'\n')
+ self.linelen = LINELEN
+ first = last
+ self.hqxdata = self.hqxdata[first:]
+ if force:
+ self.ofp.write(self.hqxdata + ':\n')
+
+ def close(self):
+ if self.data:
+ self.hqxdata = \
+ self.hqxdata + binascii.b2a_hqx(self.data)
+ self._flush(1)
+ self.ofp.close()
+ del self.ofp
+
+class _Rlecoderengine:
+ """Write data to the RLE-coder in suitably large chunks"""
+
+ def __init__(self, ofp):
+ self.ofp = ofp
+ self.data = ''
+
+ def write(self, data):
+ self.data = self.data + data
+ if len(self.data) < REASONABLY_LARGE:
+ return
+ rledata = binascii.rlecode_hqx(self.data)
+ self.ofp.write(rledata)
+ self.data = ''
+
+ def close(self):
+ if self.data:
+ rledata = binascii.rlecode_hqx(self.data)
+ self.ofp.write(rledata)
+ self.ofp.close()
+ del self.ofp
+
+class BinHex:
+ def __init__(self, (name, finfo, dlen, rlen), ofp):
+ if type(ofp) == type(''):
+ ofname = ofp
+ ofp = open(ofname, 'w')
+ if os.name == 'mac':
+ fss = FSSpec(ofname)
+ fss.SetCreatorType('BnHq', 'TEXT')
+ ofp.write('(This file must be converted with BinHex 4.0)\n\n:')
+ hqxer = _Hqxcoderengine(ofp)
+ self.ofp = _Rlecoderengine(hqxer)
+ self.crc = 0
+ if finfo is None:
+ finfo = FInfo()
+ self.dlen = dlen
+ self.rlen = rlen
+ self._writeinfo(name, finfo)
+ self.state = _DID_HEADER
+
+ def _writeinfo(self, name, finfo):
+ nl = len(name)
+ if nl > 63:
+ raise Error, 'Filename too long'
+ d = chr(nl) + name + '\0'
+ d2 = finfo.Type + finfo.Creator
+
+ # Force all structs to be packed with big-endian
+ d3 = struct.pack('>h', finfo.Flags)
+ d4 = struct.pack('>ii', self.dlen, self.rlen)
+ info = d + d2 + d3 + d4
+ self._write(info)
+ self._writecrc()
+
+ def _write(self, data):
+ self.crc = binascii.crc_hqx(data, self.crc)
+ self.ofp.write(data)
+
+ def _writecrc(self):
+ # XXXX Should this be here??
+ # self.crc = binascii.crc_hqx('\0\0', self.crc)
+ if self.crc < 0:
+ fmt = '>h'
+ else:
+ fmt = '>H'
+ self.ofp.write(struct.pack(fmt, self.crc))
+ self.crc = 0
+
+ def write(self, data):
+ if self.state != _DID_HEADER:
+ raise Error, 'Writing data at the wrong time'
+ self.dlen = self.dlen - len(data)
+ self._write(data)
+
+ def close_data(self):
+ if self.dlen != 0:
+ raise Error, 'Incorrect data size, diff=%r' % (self.rlen,)
+ self._writecrc()
+ self.state = _DID_DATA
+
+ def write_rsrc(self, data):
+ if self.state < _DID_DATA:
+ self.close_data()
+ if self.state != _DID_DATA:
+ raise Error, 'Writing resource data at the wrong time'
+ self.rlen = self.rlen - len(data)
+ self._write(data)
+
+ def close(self):
+ if self.state < _DID_DATA:
+ self.close_data()
+ if self.state != _DID_DATA:
+ raise Error, 'Close at the wrong time'
+ if self.rlen != 0:
+ raise Error, \
+ "Incorrect resource-datasize, diff=%r" % (self.rlen,)
+ self._writecrc()
+ self.ofp.close()
+ self.state = None
+ del self.ofp
+
+def binhex(inp, out):
+ """(infilename, outfilename) - Create binhex-encoded copy of a file"""
+ finfo = getfileinfo(inp)
+ ofp = BinHex(finfo, out)
+
+ ifp = open(inp, 'rb')
+ # XXXX Do textfile translation on non-mac systems
+ while 1:
+ d = ifp.read(128000)
+ if not d: break
+ ofp.write(d)
+ ofp.close_data()
+ ifp.close()
+
+ ifp = openrsrc(inp, 'rb')
+ while 1:
+ d = ifp.read(128000)
+ if not d: break
+ ofp.write_rsrc(d)
+ ofp.close()
+ ifp.close()
+
+class _Hqxdecoderengine:
+ """Read data via the decoder in 4-byte chunks"""
+
+ def __init__(self, ifp):
+ self.ifp = ifp
+ self.eof = 0
+
+ def read(self, totalwtd):
+ """Read at least wtd bytes (or until EOF)"""
+ decdata = ''
+ wtd = totalwtd
+ #
+ # The loop here is convoluted, since we don't really now how
+ # much to decode: there may be newlines in the incoming data.
+ while wtd > 0:
+ if self.eof: return decdata
+ wtd = ((wtd+2)//3)*4
+ data = self.ifp.read(wtd)
+ #
+ # Next problem: there may not be a complete number of
+ # bytes in what we pass to a2b. Solve by yet another
+ # loop.
+ #
+ while 1:
+ try:
+ decdatacur, self.eof = \
+ binascii.a2b_hqx(data)
+ break
+ except binascii.Incomplete:
+ pass
+ newdata = self.ifp.read(1)
+ if not newdata:
+ raise Error, \
+ 'Premature EOF on binhex file'
+ data = data + newdata
+ decdata = decdata + decdatacur
+ wtd = totalwtd - len(decdata)
+ if not decdata and not self.eof:
+ raise Error, 'Premature EOF on binhex file'
+ return decdata
+
+ def close(self):
+ self.ifp.close()
+
+class _Rledecoderengine:
+ """Read data via the RLE-coder"""
+
+ def __init__(self, ifp):
+ self.ifp = ifp
+ self.pre_buffer = ''
+ self.post_buffer = ''
+ self.eof = 0
+
+ def read(self, wtd):
+ if wtd > len(self.post_buffer):
+ self._fill(wtd-len(self.post_buffer))
+ rv = self.post_buffer[:wtd]
+ self.post_buffer = self.post_buffer[wtd:]
+ return rv
+
+ def _fill(self, wtd):
+ self.pre_buffer = self.pre_buffer + self.ifp.read(wtd+4)
+ if self.ifp.eof:
+ self.post_buffer = self.post_buffer + \
+ binascii.rledecode_hqx(self.pre_buffer)
+ self.pre_buffer = ''
+ return
+
+ #
+ # Obfuscated code ahead. We have to take care that we don't
+ # end up with an orphaned RUNCHAR later on. So, we keep a couple
+ # of bytes in the buffer, depending on what the end of
+ # the buffer looks like:
+ # '\220\0\220' - Keep 3 bytes: repeated \220 (escaped as \220\0)
+ # '?\220' - Keep 2 bytes: repeated something-else
+ # '\220\0' - Escaped \220: Keep 2 bytes.
+ # '?\220?' - Complete repeat sequence: decode all
+ # otherwise: keep 1 byte.
+ #
+ mark = len(self.pre_buffer)
+ if self.pre_buffer[-3:] == RUNCHAR + '\0' + RUNCHAR:
+ mark = mark - 3
+ elif self.pre_buffer[-1] == RUNCHAR:
+ mark = mark - 2
+ elif self.pre_buffer[-2:] == RUNCHAR + '\0':
+ mark = mark - 2
+ elif self.pre_buffer[-2] == RUNCHAR:
+ pass # Decode all
+ else:
+ mark = mark - 1
+
+ self.post_buffer = self.post_buffer + \
+ binascii.rledecode_hqx(self.pre_buffer[:mark])
+ self.pre_buffer = self.pre_buffer[mark:]
+
+ def close(self):
+ self.ifp.close()
+
+class HexBin:
+ def __init__(self, ifp):
+ if type(ifp) == type(''):
+ ifp = open(ifp)
+ #
+ # Find initial colon.
+ #
+ while 1:
+ ch = ifp.read(1)
+ if not ch:
+ raise Error, "No binhex data found"
+ # Cater for \r\n terminated lines (which show up as \n\r, hence
+ # all lines start with \r)
+ if ch == '\r':
+ continue
+ if ch == ':':
+ break
+ if ch != '\n':
+ dummy = ifp.readline()
+
+ hqxifp = _Hqxdecoderengine(ifp)
+ self.ifp = _Rledecoderengine(hqxifp)
+ self.crc = 0
+ self._readheader()
+
+ def _read(self, len):
+ data = self.ifp.read(len)
+ self.crc = binascii.crc_hqx(data, self.crc)
+ return data
+
+ def _checkcrc(self):
+ filecrc = struct.unpack('>h', self.ifp.read(2))[0] & 0xffff
+ #self.crc = binascii.crc_hqx('\0\0', self.crc)
+ # XXXX Is this needed??
+ self.crc = self.crc & 0xffff
+ if filecrc != self.crc:
+ raise Error, 'CRC error, computed %x, read %x' \
+ %(self.crc, filecrc)
+ self.crc = 0
+
+ def _readheader(self):
+ len = self._read(1)
+ fname = self._read(ord(len))
+ rest = self._read(1+4+4+2+4+4)
+ self._checkcrc()
+
+ type = rest[1:5]
+ creator = rest[5:9]
+ flags = struct.unpack('>h', rest[9:11])[0]
+ self.dlen = struct.unpack('>l', rest[11:15])[0]
+ self.rlen = struct.unpack('>l', rest[15:19])[0]
+
+ self.FName = fname
+ self.FInfo = FInfo()
+ self.FInfo.Creator = creator
+ self.FInfo.Type = type
+ self.FInfo.Flags = flags
+
+ self.state = _DID_HEADER
+
+ def read(self, *n):
+ if self.state != _DID_HEADER:
+ raise Error, 'Read data at wrong time'
+ if n:
+ n = n[0]
+ n = min(n, self.dlen)
+ else:
+ n = self.dlen
+ rv = ''
+ while len(rv) < n:
+ rv = rv + self._read(n-len(rv))
+ self.dlen = self.dlen - n
+ return rv
+
+ def close_data(self):
+ if self.state != _DID_HEADER:
+ raise Error, 'close_data at wrong time'
+ if self.dlen:
+ dummy = self._read(self.dlen)
+ self._checkcrc()
+ self.state = _DID_DATA
+
+ def read_rsrc(self, *n):
+ if self.state == _DID_HEADER:
+ self.close_data()
+ if self.state != _DID_DATA:
+ raise Error, 'Read resource data at wrong time'
+ if n:
+ n = n[0]
+ n = min(n, self.rlen)
+ else:
+ n = self.rlen
+ self.rlen = self.rlen - n
+ return self._read(n)
+
+ def close(self):
+ if self.rlen:
+ dummy = self.read_rsrc(self.rlen)
+ self._checkcrc()
+ self.state = _DID_RSRC
+ self.ifp.close()
+
+def hexbin(inp, out):
+ """(infilename, outfilename) - Decode binhexed file"""
+ ifp = HexBin(inp)
+ finfo = ifp.FInfo
+ if not out:
+ out = ifp.FName
+ if os.name == 'mac':
+ ofss = FSSpec(out)
+ out = ofss.as_pathname()
+
+ ofp = open(out, 'wb')
+ # XXXX Do translation on non-mac systems
+ while 1:
+ d = ifp.read(128000)
+ if not d: break
+ ofp.write(d)
+ ofp.close()
+ ifp.close_data()
+
+ d = ifp.read_rsrc(128000)
+ if d:
+ ofp = openrsrc(out, 'wb')
+ ofp.write(d)
+ while 1:
+ d = ifp.read_rsrc(128000)
+ if not d: break
+ ofp.write(d)
+ ofp.close()
+
+ if os.name == 'mac':
+ nfinfo = ofss.GetFInfo()
+ nfinfo.Creator = finfo.Creator
+ nfinfo.Type = finfo.Type
+ nfinfo.Flags = finfo.Flags
+ ofss.SetFInfo(nfinfo)
+
+ ifp.close()
+
+def _test():
+ if os.name == 'mac':
+ import macfs
+ fss, ok = macfs.PromptGetFile('File to convert:')
+ if not ok:
+ sys.exit(0)
+ fname = fss.as_pathname()
+ else:
+ fname = sys.argv[1]
+ binhex(fname, fname+'.hqx')
+ hexbin(fname+'.hqx', fname+'.viahqx')
+ #hexbin(fname, fname+'.unpacked')
+ sys.exit(1)
+
+if __name__ == '__main__':
+ _test()
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/bisect.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,84 @@
+"""Bisection algorithms."""
+
+def insort_right(a, x, lo=0, hi=None):
+ """Insert item x in list a, and keep it sorted assuming a is sorted.
+
+ If x is already in a, insert it to the right of the rightmost x.
+
+ Optional args lo (default 0) and hi (default len(a)) bound the
+ slice of a to be searched.
+ """
+
+ if hi is None:
+ hi = len(a)
+ while lo < hi:
+ mid = (lo+hi)//2
+ if x < a[mid]: hi = mid
+ else: lo = mid+1
+ a.insert(lo, x)
+
+insort = insort_right # backward compatibility
+
+def bisect_right(a, x, lo=0, hi=None):
+ """Return the index where to insert item x in list a, assuming a is sorted.
+
+ The return value i is such that all e in a[:i] have e <= x, and all e in
+ a[i:] have e > x. So if x already appears in the list, a.insert(x) will
+ insert just after the rightmost x already there.
+
+ Optional args lo (default 0) and hi (default len(a)) bound the
+ slice of a to be searched.
+ """
+
+ if hi is None:
+ hi = len(a)
+ while lo < hi:
+ mid = (lo+hi)//2
+ if x < a[mid]: hi = mid
+ else: lo = mid+1
+ return lo
+
+bisect = bisect_right # backward compatibility
+
+def insort_left(a, x, lo=0, hi=None):
+ """Insert item x in list a, and keep it sorted assuming a is sorted.
+
+ If x is already in a, insert it to the left of the leftmost x.
+
+ Optional args lo (default 0) and hi (default len(a)) bound the
+ slice of a to be searched.
+ """
+
+ if hi is None:
+ hi = len(a)
+ while lo < hi:
+ mid = (lo+hi)//2
+ if a[mid] < x: lo = mid+1
+ else: hi = mid
+ a.insert(lo, x)
+
+
+def bisect_left(a, x, lo=0, hi=None):
+ """Return the index where to insert item x in list a, assuming a is sorted.
+
+ The return value i is such that all e in a[:i] have e < x, and all e in
+ a[i:] have e >= x. So if x already appears in the list, a.insert(x) will
+ insert just before the leftmost x already there.
+
+ Optional args lo (default 0) and hi (default len(a)) bound the
+ slice of a to be searched.
+ """
+
+ if hi is None:
+ hi = len(a)
+ while lo < hi:
+ mid = (lo+hi)//2
+ if a[mid] < x: lo = mid+1
+ else: hi = mid
+ return lo
+
+# Overwrite above definitions with a fast C implementation
+try:
+ from _bisect import bisect_right, bisect_left, insort_left, insort_right, insort, bisect
+except ImportError:
+ pass
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/bsddb/__init__.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,397 @@
+#----------------------------------------------------------------------
+# Copyright (c) 1999-2001, Digital Creations, Fredericksburg, VA, USA
+# and Andrew Kuchling. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# o Redistributions of source code must retain the above copyright
+# notice, this list of conditions, and the disclaimer that follows.
+#
+# o Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions, and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+#
+# o Neither the name of Digital Creations nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS AND CONTRIBUTORS *AS
+# IS* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL
+# CREATIONS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE.
+#----------------------------------------------------------------------
+
+
+"""Support for BerkeleyDB 3.3 through 4.4 with a simple interface.
+
+For the full featured object oriented interface use the bsddb.db module
+instead. It mirrors the Sleepycat BerkeleyDB C API.
+"""
+
+try:
+ if __name__ == 'bsddb3':
+ # import _pybsddb binary as it should be the more recent version from
+ # a standalone pybsddb addon package than the version included with
+ # python as bsddb._bsddb.
+ import _pybsddb
+ _bsddb = _pybsddb
+ from bsddb3.dbutils import DeadlockWrap as _DeadlockWrap
+ else:
+ import _bsddb
+ from bsddb.dbutils import DeadlockWrap as _DeadlockWrap
+except ImportError:
+ # Remove ourselves from sys.modules
+ import sys
+ del sys.modules[__name__]
+ raise
+
+# bsddb3 calls it db, but provide _db for backwards compatibility
+db = _db = _bsddb
+__version__ = db.__version__
+
+error = db.DBError # So bsddb.error will mean something...
+
+#----------------------------------------------------------------------
+
+import sys, os
+
+# for backwards compatibility with python versions older than 2.3, the
+# iterator interface is dynamically defined and added using a mixin
+# class. old python can't tokenize it due to the yield keyword.
+if sys.version >= '2.3':
+ import UserDict
+ from weakref import ref
+ exec """
+class _iter_mixin(UserDict.DictMixin):
+ def _make_iter_cursor(self):
+ cur = _DeadlockWrap(self.db.cursor)
+ key = id(cur)
+ self._cursor_refs[key] = ref(cur, self._gen_cref_cleaner(key))
+ return cur
+
+ def _gen_cref_cleaner(self, key):
+ # use generate the function for the weakref callback here
+ # to ensure that we do not hold a strict reference to cur
+ # in the callback.
+ return lambda ref: self._cursor_refs.pop(key, None)
+
+ def __iter__(self):
+ try:
+ cur = self._make_iter_cursor()
+
+ # FIXME-20031102-greg: race condition. cursor could
+ # be closed by another thread before this call.
+
+ # since we're only returning keys, we call the cursor
+ # methods with flags=0, dlen=0, dofs=0
+ key = _DeadlockWrap(cur.first, 0,0,0)[0]
+ yield key
+
+ next = cur.next
+ while 1:
+ try:
+ key = _DeadlockWrap(next, 0,0,0)[0]
+ yield key
+ except _bsddb.DBCursorClosedError:
+ cur = self._make_iter_cursor()
+ # FIXME-20031101-greg: race condition. cursor could
+ # be closed by another thread before this call.
+ _DeadlockWrap(cur.set, key,0,0,0)
+ next = cur.next
+ except _bsddb.DBNotFoundError:
+ return
+ except _bsddb.DBCursorClosedError:
+ # the database was modified during iteration. abort.
+ return
+
+ def iteritems(self):
+ if not self.db:
+ return
+ try:
+ cur = self._make_iter_cursor()
+
+ # FIXME-20031102-greg: race condition. cursor could
+ # be closed by another thread before this call.
+
+ kv = _DeadlockWrap(cur.first)
+ key = kv[0]
+ yield kv
+
+ next = cur.next
+ while 1:
+ try:
+ kv = _DeadlockWrap(next)
+ key = kv[0]
+ yield kv
+ except _bsddb.DBCursorClosedError:
+ cur = self._make_iter_cursor()
+ # FIXME-20031101-greg: race condition. cursor could
+ # be closed by another thread before this call.
+ _DeadlockWrap(cur.set, key,0,0,0)
+ next = cur.next
+ except _bsddb.DBNotFoundError:
+ return
+ except _bsddb.DBCursorClosedError:
+ # the database was modified during iteration. abort.
+ return
+"""
+else:
+ class _iter_mixin: pass
+
+
+class _DBWithCursor(_iter_mixin):
+ """
+ A simple wrapper around DB that makes it look like the bsddbobject in
+ the old module. It uses a cursor as needed to provide DB traversal.
+ """
+ def __init__(self, db):
+ self.db = db
+ self.db.set_get_returns_none(0)
+
+ # FIXME-20031101-greg: I believe there is still the potential
+ # for deadlocks in a multithreaded environment if someone
+ # attempts to use the any of the cursor interfaces in one
+ # thread while doing a put or delete in another thread. The
+ # reason is that _checkCursor and _closeCursors are not atomic
+ # operations. Doing our own locking around self.dbc,
+ # self.saved_dbc_key and self._cursor_refs could prevent this.
+ # TODO: A test case demonstrating the problem needs to be written.
+
+ # self.dbc is a DBCursor object used to implement the
+ # first/next/previous/last/set_location methods.
+ self.dbc = None
+ self.saved_dbc_key = None
+
+ # a collection of all DBCursor objects currently allocated
+ # by the _iter_mixin interface.
+ self._cursor_refs = {}
+
+ def __del__(self):
+ self.close()
+
+ def _checkCursor(self):
+ if self.dbc is None:
+ self.dbc = _DeadlockWrap(self.db.cursor)
+ if self.saved_dbc_key is not None:
+ _DeadlockWrap(self.dbc.set, self.saved_dbc_key)
+ self.saved_dbc_key = None
+
+ # This method is needed for all non-cursor DB calls to avoid
+ # BerkeleyDB deadlocks (due to being opened with DB_INIT_LOCK
+ # and DB_THREAD to be thread safe) when intermixing database
+ # operations that use the cursor internally with those that don't.
+ def _closeCursors(self, save=1):
+ if self.dbc:
+ c = self.dbc
+ self.dbc = None
+ if save:
+ try:
+ self.saved_dbc_key = _DeadlockWrap(c.current, 0,0,0)[0]
+ except db.DBError:
+ pass
+ _DeadlockWrap(c.close)
+ del c
+ for cref in self._cursor_refs.values():
+ c = cref()
+ if c is not None:
+ _DeadlockWrap(c.close)
+
+ def _checkOpen(self):
+ if self.db is None:
+ raise error, "BSDDB object has already been closed"
+
+ def isOpen(self):
+ return self.db is not None
+
+ def __len__(self):
+ self._checkOpen()
+ return _DeadlockWrap(lambda: len(self.db)) # len(self.db)
+
+ def __getitem__(self, key):
+ self._checkOpen()
+ return _DeadlockWrap(lambda: self.db[key]) # self.db[key]
+
+ def __setitem__(self, key, value):
+ self._checkOpen()
+ self._closeCursors()
+ def wrapF():
+ self.db[key] = value
+ _DeadlockWrap(wrapF) # self.db[key] = value
+
+ def __delitem__(self, key):
+ self._checkOpen()
+ self._closeCursors()
+ def wrapF():
+ del self.db[key]
+ _DeadlockWrap(wrapF) # del self.db[key]
+
+ def close(self):
+ self._closeCursors(save=0)
+ if self.dbc is not None:
+ _DeadlockWrap(self.dbc.close)
+ v = 0
+ if self.db is not None:
+ v = _DeadlockWrap(self.db.close)
+ self.dbc = None
+ self.db = None
+ return v
+
+ def keys(self):
+ self._checkOpen()
+ return _DeadlockWrap(self.db.keys)
+
+ def has_key(self, key):
+ self._checkOpen()
+ return _DeadlockWrap(self.db.has_key, key)
+
+ def set_location(self, key):
+ self._checkOpen()
+ self._checkCursor()
+ return _DeadlockWrap(self.dbc.set_range, key)
+
+ def next(self):
+ self._checkOpen()
+ self._checkCursor()
+ rv = _DeadlockWrap(self.dbc.next)
+ return rv
+
+ def previous(self):
+ self._checkOpen()
+ self._checkCursor()
+ rv = _DeadlockWrap(self.dbc.prev)
+ return rv
+
+ def first(self):
+ self._checkOpen()
+ self._checkCursor()
+ rv = _DeadlockWrap(self.dbc.first)
+ return rv
+
+ def last(self):
+ self._checkOpen()
+ self._checkCursor()
+ rv = _DeadlockWrap(self.dbc.last)
+ return rv
+
+ def sync(self):
+ self._checkOpen()
+ return _DeadlockWrap(self.db.sync)
+
+
+#----------------------------------------------------------------------
+# Compatibility object factory functions
+
+def hashopen(file, flag='c', mode=0666, pgsize=None, ffactor=None, nelem=None,
+ cachesize=None, lorder=None, hflags=0):
+
+ flags = _checkflag(flag, file)
+ e = _openDBEnv(cachesize)
+ d = db.DB(e)
+ d.set_flags(hflags)
+ if pgsize is not None: d.set_pagesize(pgsize)
+ if lorder is not None: d.set_lorder(lorder)
+ if ffactor is not None: d.set_h_ffactor(ffactor)
+ if nelem is not None: d.set_h_nelem(nelem)
+ d.open(file, db.DB_HASH, flags, mode)
+ return _DBWithCursor(d)
+
+#----------------------------------------------------------------------
+
+def btopen(file, flag='c', mode=0666,
+ btflags=0, cachesize=None, maxkeypage=None, minkeypage=None,
+ pgsize=None, lorder=None):
+
+ flags = _checkflag(flag, file)
+ e = _openDBEnv(cachesize)
+ d = db.DB(e)
+ if pgsize is not None: d.set_pagesize(pgsize)
+ if lorder is not None: d.set_lorder(lorder)
+ d.set_flags(btflags)
+ if minkeypage is not None: d.set_bt_minkey(minkeypage)
+ if maxkeypage is not None: d.set_bt_maxkey(maxkeypage)
+ d.open(file, db.DB_BTREE, flags, mode)
+ return _DBWithCursor(d)
+
+#----------------------------------------------------------------------
+
+
+def rnopen(file, flag='c', mode=0666,
+ rnflags=0, cachesize=None, pgsize=None, lorder=None,
+ rlen=None, delim=None, source=None, pad=None):
+
+ flags = _checkflag(flag, file)
+ e = _openDBEnv(cachesize)
+ d = db.DB(e)
+ if pgsize is not None: d.set_pagesize(pgsize)
+ if lorder is not None: d.set_lorder(lorder)
+ d.set_flags(rnflags)
+ if delim is not None: d.set_re_delim(delim)
+ if rlen is not None: d.set_re_len(rlen)
+ if source is not None: d.set_re_source(source)
+ if pad is not None: d.set_re_pad(pad)
+ d.open(file, db.DB_RECNO, flags, mode)
+ return _DBWithCursor(d)
+
+#----------------------------------------------------------------------
+
+def _openDBEnv(cachesize):
+ e = db.DBEnv()
+ if cachesize is not None:
+ if cachesize >= 20480:
+ e.set_cachesize(0, cachesize)
+ else:
+ raise error, "cachesize must be >= 20480"
+ e.set_lk_detect(db.DB_LOCK_DEFAULT)
+ e.open('.', db.DB_PRIVATE | db.DB_CREATE | db.DB_THREAD | db.DB_INIT_LOCK | db.DB_INIT_MPOOL)
+ return e
+
+def _checkflag(flag, file):
+ if flag == 'r':
+ flags = db.DB_RDONLY
+ elif flag == 'rw':
+ flags = 0
+ elif flag == 'w':
+ flags = db.DB_CREATE
+ elif flag == 'c':
+ flags = db.DB_CREATE
+ elif flag == 'n':
+ flags = db.DB_CREATE
+ #flags = db.DB_CREATE | db.DB_TRUNCATE
+ # we used db.DB_TRUNCATE flag for this before but BerkeleyDB
+ # 4.2.52 changed to disallowed truncate with txn environments.
+ if file is not None and os.path.isfile(file):
+ os.unlink(file)
+ else:
+ raise error, "flags should be one of 'r', 'w', 'c' or 'n'"
+ return flags | db.DB_THREAD
+
+#----------------------------------------------------------------------
+
+
+# This is a silly little hack that allows apps to continue to use the
+# DB_THREAD flag even on systems without threads without freaking out
+# BerkeleyDB.
+#
+# This assumes that if Python was built with thread support then
+# BerkeleyDB was too.
+
+try:
+ import thread
+ del thread
+ if db.version() < (3, 3, 0):
+ db.DB_THREAD = 0
+except ImportError:
+ db.DB_THREAD = 0
+
+#----------------------------------------------------------------------
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/bsddb/db.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,51 @@
+#----------------------------------------------------------------------
+# Copyright (c) 1999-2001, Digital Creations, Fredericksburg, VA, USA
+# and Andrew Kuchling. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# o Redistributions of source code must retain the above copyright
+# notice, this list of conditions, and the disclaimer that follows.
+#
+# o Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions, and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+#
+# o Neither the name of Digital Creations nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS AND CONTRIBUTORS *AS
+# IS* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL
+# CREATIONS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE.
+#----------------------------------------------------------------------
+
+
+# This module is just a placeholder for possible future expansion, in
+# case we ever want to augment the stuff in _db in any way. For now
+# it just simply imports everything from _db.
+
+if __name__.startswith('bsddb3.'):
+ # import _pybsddb binary as it should be the more recent version from
+ # a standalone pybsddb addon package than the version included with
+ # python as bsddb._bsddb.
+ from _pybsddb import *
+ from _pybsddb import __version__
+else:
+ from _bsddb import *
+ from _bsddb import __version__
+
+if version() < (3, 2, 0):
+ raise ImportError, "correct BerkeleyDB symbols not found. Perhaps python was statically linked with an older version?"
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/bsddb/dbobj.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,254 @@
+#-------------------------------------------------------------------------
+# This file contains real Python object wrappers for DB and DBEnv
+# C "objects" that can be usefully subclassed. The previous SWIG
+# based interface allowed this thanks to SWIG's shadow classes.
+# -- Gregory P. Smith
+#-------------------------------------------------------------------------
+#
+# (C) Copyright 2001 Autonomous Zone Industries
+#
+# License: This is free software. You may use this software for any
+# purpose including modification/redistribution, so long as
+# this header remains intact and that you do not claim any
+# rights of ownership or authorship of this software. This
+# software has been tested, but no warranty is expressed or
+# implied.
+#
+
+#
+# TODO it would be *really nice* to have an automatic shadow class populator
+# so that new methods don't need to be added here manually after being
+# added to _bsddb.c.
+#
+
+import db
+
+try:
+ from UserDict import DictMixin
+except ImportError:
+ # DictMixin is new in Python 2.3
+ class DictMixin: pass
+
+class DBEnv:
+ def __init__(self, *args, **kwargs):
+ self._cobj = apply(db.DBEnv, args, kwargs)
+
+ def close(self, *args, **kwargs):
+ return apply(self._cobj.close, args, kwargs)
+ def open(self, *args, **kwargs):
+ return apply(self._cobj.open, args, kwargs)
+ def remove(self, *args, **kwargs):
+ return apply(self._cobj.remove, args, kwargs)
+ def set_shm_key(self, *args, **kwargs):
+ return apply(self._cobj.set_shm_key, args, kwargs)
+ def set_cachesize(self, *args, **kwargs):
+ return apply(self._cobj.set_cachesize, args, kwargs)
+ def set_data_dir(self, *args, **kwargs):
+ return apply(self._cobj.set_data_dir, args, kwargs)
+ def set_flags(self, *args, **kwargs):
+ return apply(self._cobj.set_flags, args, kwargs)
+ def set_lg_bsize(self, *args, **kwargs):
+ return apply(self._cobj.set_lg_bsize, args, kwargs)
+ def set_lg_dir(self, *args, **kwargs):
+ return apply(self._cobj.set_lg_dir, args, kwargs)
+ def set_lg_max(self, *args, **kwargs):
+ return apply(self._cobj.set_lg_max, args, kwargs)
+ def set_lk_detect(self, *args, **kwargs):
+ return apply(self._cobj.set_lk_detect, args, kwargs)
+ if db.version() < (4,5):
+ def set_lk_max(self, *args, **kwargs):
+ return apply(self._cobj.set_lk_max, args, kwargs)
+ def set_lk_max_locks(self, *args, **kwargs):
+ return apply(self._cobj.set_lk_max_locks, args, kwargs)
+ def set_lk_max_lockers(self, *args, **kwargs):
+ return apply(self._cobj.set_lk_max_lockers, args, kwargs)
+ def set_lk_max_objects(self, *args, **kwargs):
+ return apply(self._cobj.set_lk_max_objects, args, kwargs)
+ def set_mp_mmapsize(self, *args, **kwargs):
+ return apply(self._cobj.set_mp_mmapsize, args, kwargs)
+ def set_timeout(self, *args, **kwargs):
+ return apply(self._cobj.set_timeout, args, kwargs)
+ def set_tmp_dir(self, *args, **kwargs):
+ return apply(self._cobj.set_tmp_dir, args, kwargs)
+ def txn_begin(self, *args, **kwargs):
+ return apply(self._cobj.txn_begin, args, kwargs)
+ def txn_checkpoint(self, *args, **kwargs):
+ return apply(self._cobj.txn_checkpoint, args, kwargs)
+ def txn_stat(self, *args, **kwargs):
+ return apply(self._cobj.txn_stat, args, kwargs)
+ def set_tx_max(self, *args, **kwargs):
+ return apply(self._cobj.set_tx_max, args, kwargs)
+ def set_tx_timestamp(self, *args, **kwargs):
+ return apply(self._cobj.set_tx_timestamp, args, kwargs)
+ def lock_detect(self, *args, **kwargs):
+ return apply(self._cobj.lock_detect, args, kwargs)
+ def lock_get(self, *args, **kwargs):
+ return apply(self._cobj.lock_get, args, kwargs)
+ def lock_id(self, *args, **kwargs):
+ return apply(self._cobj.lock_id, args, kwargs)
+ def lock_put(self, *args, **kwargs):
+ return apply(self._cobj.lock_put, args, kwargs)
+ def lock_stat(self, *args, **kwargs):
+ return apply(self._cobj.lock_stat, args, kwargs)
+ def log_archive(self, *args, **kwargs):
+ return apply(self._cobj.log_archive, args, kwargs)
+
+ def set_get_returns_none(self, *args, **kwargs):
+ return apply(self._cobj.set_get_returns_none, args, kwargs)
+
+ if db.version() >= (4,0):
+ def log_stat(self, *args, **kwargs):
+ return apply(self._cobj.log_stat, args, kwargs)
+
+ if db.version() >= (4,1):
+ def dbremove(self, *args, **kwargs):
+ return apply(self._cobj.dbremove, args, kwargs)
+ def dbrename(self, *args, **kwargs):
+ return apply(self._cobj.dbrename, args, kwargs)
+ def set_encrypt(self, *args, **kwargs):
+ return apply(self._cobj.set_encrypt, args, kwargs)
+
+ if db.version() >= (4,4):
+ def lsn_reset(self, *args, **kwargs):
+ return apply(self._cobj.lsn_reset, args, kwargs)
+
+
+class DB(DictMixin):
+ def __init__(self, dbenv, *args, **kwargs):
+ # give it the proper DBEnv C object that its expecting
+ self._cobj = apply(db.DB, (dbenv._cobj,) + args, kwargs)
+
+ # TODO are there other dict methods that need to be overridden?
+ def __len__(self):
+ return len(self._cobj)
+ def __getitem__(self, arg):
+ return self._cobj[arg]
+ def __setitem__(self, key, value):
+ self._cobj[key] = value
+ def __delitem__(self, arg):
+ del self._cobj[arg]
+
+ def append(self, *args, **kwargs):
+ return apply(self._cobj.append, args, kwargs)
+ def associate(self, *args, **kwargs):
+ return apply(self._cobj.associate, args, kwargs)
+ def close(self, *args, **kwargs):
+ return apply(self._cobj.close, args, kwargs)
+ def consume(self, *args, **kwargs):
+ return apply(self._cobj.consume, args, kwargs)
+ def consume_wait(self, *args, **kwargs):
+ return apply(self._cobj.consume_wait, args, kwargs)
+ def cursor(self, *args, **kwargs):
+ return apply(self._cobj.cursor, args, kwargs)
+ def delete(self, *args, **kwargs):
+ return apply(self._cobj.delete, args, kwargs)
+ def fd(self, *args, **kwargs):
+ return apply(self._cobj.fd, args, kwargs)
+ def get(self, *args, **kwargs):
+ return apply(self._cobj.get, args, kwargs)
+ def pget(self, *args, **kwargs):
+ return apply(self._cobj.pget, args, kwargs)
+ def get_both(self, *args, **kwargs):
+ return apply(self._cobj.get_both, args, kwargs)
+ def get_byteswapped(self, *args, **kwargs):
+ return apply(self._cobj.get_byteswapped, args, kwargs)
+ def get_size(self, *args, **kwargs):
+ return apply(self._cobj.get_size, args, kwargs)
+ def get_type(self, *args, **kwargs):
+ return apply(self._cobj.get_type, args, kwargs)
+ def join(self, *args, **kwargs):
+ return apply(self._cobj.join, args, kwargs)
+ def key_range(self, *args, **kwargs):
+ return apply(self._cobj.key_range, args, kwargs)
+ def has_key(self, *args, **kwargs):
+ return apply(self._cobj.has_key, args, kwargs)
+ def items(self, *args, **kwargs):
+ return apply(self._cobj.items, args, kwargs)
+ def keys(self, *args, **kwargs):
+ return apply(self._cobj.keys, args, kwargs)
+ def open(self, *args, **kwargs):
+ return apply(self._cobj.open, args, kwargs)
+ def put(self, *args, **kwargs):
+ return apply(self._cobj.put, args, kwargs)
+ def remove(self, *args, **kwargs):
+ return apply(self._cobj.remove, args, kwargs)
+ def rename(self, *args, **kwargs):
+ return apply(self._cobj.rename, args, kwargs)
+ def set_bt_minkey(self, *args, **kwargs):
+ return apply(self._cobj.set_bt_minkey, args, kwargs)
+ def set_bt_compare(self, *args, **kwargs):
+ return apply(self._cobj.set_bt_compare, args, kwargs)
+ def set_cachesize(self, *args, **kwargs):
+ return apply(self._cobj.set_cachesize, args, kwargs)
+ def set_flags(self, *args, **kwargs):
+ return apply(self._cobj.set_flags, args, kwargs)
+ def set_h_ffactor(self, *args, **kwargs):
+ return apply(self._cobj.set_h_ffactor, args, kwargs)
+ def set_h_nelem(self, *args, **kwargs):
+ return apply(self._cobj.set_h_nelem, args, kwargs)
+ def set_lorder(self, *args, **kwargs):
+ return apply(self._cobj.set_lorder, args, kwargs)
+ def set_pagesize(self, *args, **kwargs):
+ return apply(self._cobj.set_pagesize, args, kwargs)
+ def set_re_delim(self, *args, **kwargs):
+ return apply(self._cobj.set_re_delim, args, kwargs)
+ def set_re_len(self, *args, **kwargs):
+ return apply(self._cobj.set_re_len, args, kwargs)
+ def set_re_pad(self, *args, **kwargs):
+ return apply(self._cobj.set_re_pad, args, kwargs)
+ def set_re_source(self, *args, **kwargs):
+ return apply(self._cobj.set_re_source, args, kwargs)
+ def set_q_extentsize(self, *args, **kwargs):
+ return apply(self._cobj.set_q_extentsize, args, kwargs)
+ def stat(self, *args, **kwargs):
+ return apply(self._cobj.stat, args, kwargs)
+ def sync(self, *args, **kwargs):
+ return apply(self._cobj.sync, args, kwargs)
+ def type(self, *args, **kwargs):
+ return apply(self._cobj.type, args, kwargs)
+ def upgrade(self, *args, **kwargs):
+ return apply(self._cobj.upgrade, args, kwargs)
+ def values(self, *args, **kwargs):
+ return apply(self._cobj.values, args, kwargs)
+ def verify(self, *args, **kwargs):
+ return apply(self._cobj.verify, args, kwargs)
+ def set_get_returns_none(self, *args, **kwargs):
+ return apply(self._cobj.set_get_returns_none, args, kwargs)
+
+ if db.version() >= (4,1):
+ def set_encrypt(self, *args, **kwargs):
+ return apply(self._cobj.set_encrypt, args, kwargs)
+
+
+class DBSequence:
+ def __init__(self, *args, **kwargs):
+ self._cobj = apply(db.DBSequence, args, kwargs)
+
+ def close(self, *args, **kwargs):
+ return apply(self._cobj.close, args, kwargs)
+ def get(self, *args, **kwargs):
+ return apply(self._cobj.get, args, kwargs)
+ def get_dbp(self, *args, **kwargs):
+ return apply(self._cobj.get_dbp, args, kwargs)
+ def get_key(self, *args, **kwargs):
+ return apply(self._cobj.get_key, args, kwargs)
+ def init_value(self, *args, **kwargs):
+ return apply(self._cobj.init_value, args, kwargs)
+ def open(self, *args, **kwargs):
+ return apply(self._cobj.open, args, kwargs)
+ def remove(self, *args, **kwargs):
+ return apply(self._cobj.remove, args, kwargs)
+ def stat(self, *args, **kwargs):
+ return apply(self._cobj.stat, args, kwargs)
+ def set_cachesize(self, *args, **kwargs):
+ return apply(self._cobj.set_cachesize, args, kwargs)
+ def set_flags(self, *args, **kwargs):
+ return apply(self._cobj.set_flags, args, kwargs)
+ def set_range(self, *args, **kwargs):
+ return apply(self._cobj.set_range, args, kwargs)
+ def get_cachesize(self, *args, **kwargs):
+ return apply(self._cobj.get_cachesize, args, kwargs)
+ def get_flags(self, *args, **kwargs):
+ return apply(self._cobj.get_flags, args, kwargs)
+ def get_range(self, *args, **kwargs):
+ return apply(self._cobj.get_range, args, kwargs)
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/bsddb/dbrecio.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,190 @@
+
+"""
+File-like objects that read from or write to a bsddb record.
+
+This implements (nearly) all stdio methods.
+
+f = DBRecIO(db, key, txn=None)
+f.close() # explicitly release resources held
+flag = f.isatty() # always false
+pos = f.tell() # get current position
+f.seek(pos) # set current position
+f.seek(pos, mode) # mode 0: absolute; 1: relative; 2: relative to EOF
+buf = f.read() # read until EOF
+buf = f.read(n) # read up to n bytes
+f.truncate([size]) # truncate file at to at most size (default: current pos)
+f.write(buf) # write at current position
+f.writelines(list) # for line in list: f.write(line)
+
+Notes:
+- fileno() is left unimplemented so that code which uses it triggers
+ an exception early.
+- There's a simple test set (see end of this file) - not yet updated
+ for DBRecIO.
+- readline() is not implemented yet.
+
+
+From:
+ Itamar Shtull-Trauring <itamar@maxnm.com>
+"""
+
+import errno
+import string
+
+class DBRecIO:
+ def __init__(self, db, key, txn=None):
+ self.db = db
+ self.key = key
+ self.txn = txn
+ self.len = None
+ self.pos = 0
+ self.closed = 0
+ self.softspace = 0
+
+ def close(self):
+ if not self.closed:
+ self.closed = 1
+ del self.db, self.txn
+
+ def isatty(self):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ return 0
+
+ def seek(self, pos, mode = 0):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ if mode == 1:
+ pos = pos + self.pos
+ elif mode == 2:
+ pos = pos + self.len
+ self.pos = max(0, pos)
+
+ def tell(self):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ return self.pos
+
+ def read(self, n = -1):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ if n < 0:
+ newpos = self.len
+ else:
+ newpos = min(self.pos+n, self.len)
+
+ dlen = newpos - self.pos
+
+ r = self.db.get(self.key, txn=self.txn, dlen=dlen, doff=self.pos)
+ self.pos = newpos
+ return r
+
+ __fixme = """
+ def readline(self, length=None):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ if self.buflist:
+ self.buf = self.buf + string.joinfields(self.buflist, '')
+ self.buflist = []
+ i = string.find(self.buf, '\n', self.pos)
+ if i < 0:
+ newpos = self.len
+ else:
+ newpos = i+1
+ if length is not None:
+ if self.pos + length < newpos:
+ newpos = self.pos + length
+ r = self.buf[self.pos:newpos]
+ self.pos = newpos
+ return r
+
+ def readlines(self, sizehint = 0):
+ total = 0
+ lines = []
+ line = self.readline()
+ while line:
+ lines.append(line)
+ total += len(line)
+ if 0 < sizehint <= total:
+ break
+ line = self.readline()
+ return lines
+ """
+
+ def truncate(self, size=None):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ if size is None:
+ size = self.pos
+ elif size < 0:
+ raise IOError(errno.EINVAL,
+ "Negative size not allowed")
+ elif size < self.pos:
+ self.pos = size
+ self.db.put(self.key, "", txn=self.txn, dlen=self.len-size, doff=size)
+
+ def write(self, s):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ if not s: return
+ if self.pos > self.len:
+ self.buflist.append('\0'*(self.pos - self.len))
+ self.len = self.pos
+ newpos = self.pos + len(s)
+ self.db.put(self.key, s, txn=self.txn, dlen=len(s), doff=self.pos)
+ self.pos = newpos
+
+ def writelines(self, list):
+ self.write(string.joinfields(list, ''))
+
+ def flush(self):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+
+
+"""
+# A little test suite
+
+def _test():
+ import sys
+ if sys.argv[1:]:
+ file = sys.argv[1]
+ else:
+ file = '/etc/passwd'
+ lines = open(file, 'r').readlines()
+ text = open(file, 'r').read()
+ f = StringIO()
+ for line in lines[:-2]:
+ f.write(line)
+ f.writelines(lines[-2:])
+ if f.getvalue() != text:
+ raise RuntimeError, 'write failed'
+ length = f.tell()
+ print 'File length =', length
+ f.seek(len(lines[0]))
+ f.write(lines[1])
+ f.seek(0)
+ print 'First line =', repr(f.readline())
+ here = f.tell()
+ line = f.readline()
+ print 'Second line =', repr(line)
+ f.seek(-len(line), 1)
+ line2 = f.read(len(line))
+ if line != line2:
+ raise RuntimeError, 'bad result after seek back'
+ f.seek(len(line2), 1)
+ list = f.readlines()
+ line = list[-1]
+ f.seek(f.tell() - len(line))
+ line2 = f.read()
+ if line != line2:
+ raise RuntimeError, 'bad result after seek back from EOF'
+ print 'Read', len(list), 'more lines'
+ print 'File length =', f.tell()
+ if f.tell() != length:
+ raise RuntimeError, 'bad length'
+ f.close()
+
+if __name__ == '__main__':
+ _test()
+"""
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/bsddb/dbshelve.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,299 @@
+#!/bin/env python
+#------------------------------------------------------------------------
+# Copyright (c) 1997-2001 by Total Control Software
+# All Rights Reserved
+#------------------------------------------------------------------------
+#
+# Module Name: dbShelve.py
+#
+# Description: A reimplementation of the standard shelve.py that
+# forces the use of cPickle, and DB.
+#
+# Creation Date: 11/3/97 3:39:04PM
+#
+# License: This is free software. You may use this software for any
+# purpose including modification/redistribution, so long as
+# this header remains intact and that you do not claim any
+# rights of ownership or authorship of this software. This
+# software has been tested, but no warranty is expressed or
+# implied.
+#
+# 13-Dec-2000: Updated to be used with the new bsddb3 package.
+# Added DBShelfCursor class.
+#
+#------------------------------------------------------------------------
+
+"""Manage shelves of pickled objects using bsddb database files for the
+storage.
+"""
+
+#------------------------------------------------------------------------
+
+import cPickle
+try:
+ from UserDict import DictMixin
+except ImportError:
+ # DictMixin is new in Python 2.3
+ class DictMixin: pass
+import db
+
+#------------------------------------------------------------------------
+
+
+def open(filename, flags=db.DB_CREATE, mode=0660, filetype=db.DB_HASH,
+ dbenv=None, dbname=None):
+ """
+ A simple factory function for compatibility with the standard
+ shleve.py module. It can be used like this, where key is a string
+ and data is a pickleable object:
+
+ from bsddb import dbshelve
+ db = dbshelve.open(filename)
+
+ db[key] = data
+
+ db.close()
+ """
+ if type(flags) == type(''):
+ sflag = flags
+ if sflag == 'r':
+ flags = db.DB_RDONLY
+ elif sflag == 'rw':
+ flags = 0
+ elif sflag == 'w':
+ flags = db.DB_CREATE
+ elif sflag == 'c':
+ flags = db.DB_CREATE
+ elif sflag == 'n':
+ flags = db.DB_TRUNCATE | db.DB_CREATE
+ else:
+ raise db.DBError, "flags should be one of 'r', 'w', 'c' or 'n' or use the bsddb.db.DB_* flags"
+
+ d = DBShelf(dbenv)
+ d.open(filename, dbname, filetype, flags, mode)
+ return d
+
+#---------------------------------------------------------------------------
+
+class DBShelf(DictMixin):
+ """A shelf to hold pickled objects, built upon a bsddb DB object. It
+ automatically pickles/unpickles data objects going to/from the DB.
+ """
+ def __init__(self, dbenv=None):
+ self.db = db.DB(dbenv)
+ self.binary = 1
+
+
+ def __del__(self):
+ self.close()
+
+
+ def __getattr__(self, name):
+ """Many methods we can just pass through to the DB object.
+ (See below)
+ """
+ return getattr(self.db, name)
+
+
+ #-----------------------------------
+ # Dictionary access methods
+
+ def __len__(self):
+ return len(self.db)
+
+
+ def __getitem__(self, key):
+ data = self.db[key]
+ return cPickle.loads(data)
+
+
+ def __setitem__(self, key, value):
+ data = cPickle.dumps(value, self.binary)
+ self.db[key] = data
+
+
+ def __delitem__(self, key):
+ del self.db[key]
+
+
+ def keys(self, txn=None):
+ if txn != None:
+ return self.db.keys(txn)
+ else:
+ return self.db.keys()
+
+
+ def items(self, txn=None):
+ if txn != None:
+ items = self.db.items(txn)
+ else:
+ items = self.db.items()
+ newitems = []
+
+ for k, v in items:
+ newitems.append( (k, cPickle.loads(v)) )
+ return newitems
+
+ def values(self, txn=None):
+ if txn != None:
+ values = self.db.values(txn)
+ else:
+ values = self.db.values()
+
+ return map(cPickle.loads, values)
+
+ #-----------------------------------
+ # Other methods
+
+ def __append(self, value, txn=None):
+ data = cPickle.dumps(value, self.binary)
+ return self.db.append(data, txn)
+
+ def append(self, value, txn=None):
+ if self.get_type() != db.DB_RECNO:
+ self.append = self.__append
+ return self.append(value, txn=txn)
+ raise db.DBError, "append() only supported when dbshelve opened with filetype=dbshelve.db.DB_RECNO"
+
+
+ def associate(self, secondaryDB, callback, flags=0):
+ def _shelf_callback(priKey, priData, realCallback=callback):
+ data = cPickle.loads(priData)
+ return realCallback(priKey, data)
+ return self.db.associate(secondaryDB, _shelf_callback, flags)
+
+
+ #def get(self, key, default=None, txn=None, flags=0):
+ def get(self, *args, **kw):
+ # We do it with *args and **kw so if the default value wasn't
+ # given nothing is passed to the extension module. That way
+ # an exception can be raised if set_get_returns_none is turned
+ # off.
+ data = apply(self.db.get, args, kw)
+ try:
+ return cPickle.loads(data)
+ except (TypeError, cPickle.UnpicklingError):
+ return data # we may be getting the default value, or None,
+ # so it doesn't need unpickled.
+
+ def get_both(self, key, value, txn=None, flags=0):
+ data = cPickle.dumps(value, self.binary)
+ data = self.db.get(key, data, txn, flags)
+ return cPickle.loads(data)
+
+
+ def cursor(self, txn=None, flags=0):
+ c = DBShelfCursor(self.db.cursor(txn, flags))
+ c.binary = self.binary
+ return c
+
+
+ def put(self, key, value, txn=None, flags=0):
+ data = cPickle.dumps(value, self.binary)
+ return self.db.put(key, data, txn, flags)
+
+
+ def join(self, cursorList, flags=0):
+ raise NotImplementedError
+
+
+ #----------------------------------------------
+ # Methods allowed to pass-through to self.db
+ #
+ # close, delete, fd, get_byteswapped, get_type, has_key,
+ # key_range, open, remove, rename, stat, sync,
+ # upgrade, verify, and all set_* methods.
+
+
+#---------------------------------------------------------------------------
+
+class DBShelfCursor:
+ """
+ """
+ def __init__(self, cursor):
+ self.dbc = cursor
+
+ def __del__(self):
+ self.close()
+
+
+ def __getattr__(self, name):
+ """Some methods we can just pass through to the cursor object. (See below)"""
+ return getattr(self.dbc, name)
+
+
+ #----------------------------------------------
+
+ def dup(self, flags=0):
+ return DBShelfCursor(self.dbc.dup(flags))
+
+
+ def put(self, key, value, flags=0):
+ data = cPickle.dumps(value, self.binary)
+ return self.dbc.put(key, data, flags)
+
+
+ def get(self, *args):
+ count = len(args) # a method overloading hack
+ method = getattr(self, 'get_%d' % count)
+ apply(method, args)
+
+ def get_1(self, flags):
+ rec = self.dbc.get(flags)
+ return self._extract(rec)
+
+ def get_2(self, key, flags):
+ rec = self.dbc.get(key, flags)
+ return self._extract(rec)
+
+ def get_3(self, key, value, flags):
+ data = cPickle.dumps(value, self.binary)
+ rec = self.dbc.get(key, flags)
+ return self._extract(rec)
+
+
+ def current(self, flags=0): return self.get_1(flags|db.DB_CURRENT)
+ def first(self, flags=0): return self.get_1(flags|db.DB_FIRST)
+ def last(self, flags=0): return self.get_1(flags|db.DB_LAST)
+ def next(self, flags=0): return self.get_1(flags|db.DB_NEXT)
+ def prev(self, flags=0): return self.get_1(flags|db.DB_PREV)
+ def consume(self, flags=0): return self.get_1(flags|db.DB_CONSUME)
+ def next_dup(self, flags=0): return self.get_1(flags|db.DB_NEXT_DUP)
+ def next_nodup(self, flags=0): return self.get_1(flags|db.DB_NEXT_NODUP)
+ def prev_nodup(self, flags=0): return self.get_1(flags|db.DB_PREV_NODUP)
+
+
+ def get_both(self, key, value, flags=0):
+ data = cPickle.dumps(value, self.binary)
+ rec = self.dbc.get_both(key, flags)
+ return self._extract(rec)
+
+
+ def set(self, key, flags=0):
+ rec = self.dbc.set(key, flags)
+ return self._extract(rec)
+
+ def set_range(self, key, flags=0):
+ rec = self.dbc.set_range(key, flags)
+ return self._extract(rec)
+
+ def set_recno(self, recno, flags=0):
+ rec = self.dbc.set_recno(recno, flags)
+ return self._extract(rec)
+
+ set_both = get_both
+
+ def _extract(self, rec):
+ if rec is None:
+ return None
+ else:
+ key, data = rec
+ return key, cPickle.loads(data)
+
+ #----------------------------------------------
+ # Methods allowed to pass-through to self.dbc
+ #
+ # close, count, delete, get_recno, join_item
+
+
+#---------------------------------------------------------------------------
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/bsddb/dbtables.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,706 @@
+#-----------------------------------------------------------------------
+#
+# Copyright (C) 2000, 2001 by Autonomous Zone Industries
+# Copyright (C) 2002 Gregory P. Smith
+#
+# License: This is free software. You may use this software for any
+# purpose including modification/redistribution, so long as
+# this header remains intact and that you do not claim any
+# rights of ownership or authorship of this software. This
+# software has been tested, but no warranty is expressed or
+# implied.
+#
+# -- Gregory P. Smith <greg@electricrain.com>
+
+# This provides a simple database table interface built on top of
+# the Python BerkeleyDB 3 interface.
+#
+_cvsid = '$Id: dbtables.py 46858 2006-06-11 08:35:14Z neal.norwitz $'
+
+import re
+import sys
+import copy
+import xdrlib
+import random
+from types import ListType, StringType
+import cPickle as pickle
+
+try:
+ # For Pythons w/distutils pybsddb
+ from bsddb3.db import *
+except ImportError:
+ # For Python 2.3
+ from bsddb.db import *
+
+# XXX(nnorwitz): is this correct? DBIncompleteError is conditional in _bsddb.c
+try:
+ DBIncompleteError
+except NameError:
+ class DBIncompleteError(Exception):
+ pass
+
+class TableDBError(StandardError):
+ pass
+class TableAlreadyExists(TableDBError):
+ pass
+
+
+class Cond:
+ """This condition matches everything"""
+ def __call__(self, s):
+ return 1
+
+class ExactCond(Cond):
+ """Acts as an exact match condition function"""
+ def __init__(self, strtomatch):
+ self.strtomatch = strtomatch
+ def __call__(self, s):
+ return s == self.strtomatch
+
+class PrefixCond(Cond):
+ """Acts as a condition function for matching a string prefix"""
+ def __init__(self, prefix):
+ self.prefix = prefix
+ def __call__(self, s):
+ return s[:len(self.prefix)] == self.prefix
+
+class PostfixCond(Cond):
+ """Acts as a condition function for matching a string postfix"""
+ def __init__(self, postfix):
+ self.postfix = postfix
+ def __call__(self, s):
+ return s[-len(self.postfix):] == self.postfix
+
+class LikeCond(Cond):
+ """
+ Acts as a function that will match using an SQL 'LIKE' style
+ string. Case insensitive and % signs are wild cards.
+ This isn't perfect but it should work for the simple common cases.
+ """
+ def __init__(self, likestr, re_flags=re.IGNORECASE):
+ # escape python re characters
+ chars_to_escape = '.*+()[]?'
+ for char in chars_to_escape :
+ likestr = likestr.replace(char, '\\'+char)
+ # convert %s to wildcards
+ self.likestr = likestr.replace('%', '.*')
+ self.re = re.compile('^'+self.likestr+'$', re_flags)
+ def __call__(self, s):
+ return self.re.match(s)
+
+#
+# keys used to store database metadata
+#
+_table_names_key = '__TABLE_NAMES__' # list of the tables in this db
+_columns = '._COLUMNS__' # table_name+this key contains a list of columns
+
+def _columns_key(table):
+ return table + _columns
+
+#
+# these keys are found within table sub databases
+#
+_data = '._DATA_.' # this+column+this+rowid key contains table data
+_rowid = '._ROWID_.' # this+rowid+this key contains a unique entry for each
+ # row in the table. (no data is stored)
+_rowid_str_len = 8 # length in bytes of the unique rowid strings
+
+def _data_key(table, col, rowid):
+ return table + _data + col + _data + rowid
+
+def _search_col_data_key(table, col):
+ return table + _data + col + _data
+
+def _search_all_data_key(table):
+ return table + _data
+
+def _rowid_key(table, rowid):
+ return table + _rowid + rowid + _rowid
+
+def _search_rowid_key(table):
+ return table + _rowid
+
+def contains_metastrings(s) :
+ """Verify that the given string does not contain any
+ metadata strings that might interfere with dbtables database operation.
+ """
+ if (s.find(_table_names_key) >= 0 or
+ s.find(_columns) >= 0 or
+ s.find(_data) >= 0 or
+ s.find(_rowid) >= 0):
+ # Then
+ return 1
+ else:
+ return 0
+
+
+class bsdTableDB :
+ def __init__(self, filename, dbhome, create=0, truncate=0, mode=0600,
+ recover=0, dbflags=0):
+ """bsdTableDB(filename, dbhome, create=0, truncate=0, mode=0600)
+
+ Open database name in the dbhome BerkeleyDB directory.
+ Use keyword arguments when calling this constructor.
+ """
+ self.db = None
+ myflags = DB_THREAD
+ if create:
+ myflags |= DB_CREATE
+ flagsforenv = (DB_INIT_MPOOL | DB_INIT_LOCK | DB_INIT_LOG |
+ DB_INIT_TXN | dbflags)
+ # DB_AUTO_COMMIT isn't a valid flag for env.open()
+ try:
+ dbflags |= DB_AUTO_COMMIT
+ except AttributeError:
+ pass
+ if recover:
+ flagsforenv = flagsforenv | DB_RECOVER
+ self.env = DBEnv()
+ # enable auto deadlock avoidance
+ self.env.set_lk_detect(DB_LOCK_DEFAULT)
+ self.env.open(dbhome, myflags | flagsforenv)
+ if truncate:
+ myflags |= DB_TRUNCATE
+ self.db = DB(self.env)
+ # this code relies on DBCursor.set* methods to raise exceptions
+ # rather than returning None
+ self.db.set_get_returns_none(1)
+ # allow duplicate entries [warning: be careful w/ metadata]
+ self.db.set_flags(DB_DUP)
+ self.db.open(filename, DB_BTREE, dbflags | myflags, mode)
+ self.dbfilename = filename
+ # Initialize the table names list if this is a new database
+ txn = self.env.txn_begin()
+ try:
+ if not self.db.has_key(_table_names_key, txn):
+ self.db.put(_table_names_key, pickle.dumps([], 1), txn=txn)
+ # Yes, bare except
+ except:
+ txn.abort()
+ raise
+ else:
+ txn.commit()
+ # TODO verify more of the database's metadata?
+ self.__tablecolumns = {}
+
+ def __del__(self):
+ self.close()
+
+ def close(self):
+ if self.db is not None:
+ self.db.close()
+ self.db = None
+ if self.env is not None:
+ self.env.close()
+ self.env = None
+
+ def checkpoint(self, mins=0):
+ try:
+ self.env.txn_checkpoint(mins)
+ except DBIncompleteError:
+ pass
+
+ def sync(self):
+ try:
+ self.db.sync()
+ except DBIncompleteError:
+ pass
+
+ def _db_print(self) :
+ """Print the database to stdout for debugging"""
+ print "******** Printing raw database for debugging ********"
+ cur = self.db.cursor()
+ try:
+ key, data = cur.first()
+ while 1:
+ print repr({key: data})
+ next = cur.next()
+ if next:
+ key, data = next
+ else:
+ cur.close()
+ return
+ except DBNotFoundError:
+ cur.close()
+
+
+ def CreateTable(self, table, columns):
+ """CreateTable(table, columns) - Create a new table in the database.
+
+ raises TableDBError if it already exists or for other DB errors.
+ """
+ assert isinstance(columns, ListType)
+ txn = None
+ try:
+ # checking sanity of the table and column names here on
+ # table creation will prevent problems elsewhere.
+ if contains_metastrings(table):
+ raise ValueError(
+ "bad table name: contains reserved metastrings")
+ for column in columns :
+ if contains_metastrings(column):
+ raise ValueError(
+ "bad column name: contains reserved metastrings")
+
+ columnlist_key = _columns_key(table)
+ if self.db.has_key(columnlist_key):
+ raise TableAlreadyExists, "table already exists"
+
+ txn = self.env.txn_begin()
+ # store the table's column info
+ self.db.put(columnlist_key, pickle.dumps(columns, 1), txn=txn)
+
+ # add the table name to the tablelist
+ tablelist = pickle.loads(self.db.get(_table_names_key, txn=txn,
+ flags=DB_RMW))
+ tablelist.append(table)
+ # delete 1st, in case we opened with DB_DUP
+ self.db.delete(_table_names_key, txn)
+ self.db.put(_table_names_key, pickle.dumps(tablelist, 1), txn=txn)
+
+ txn.commit()
+ txn = None
+ except DBError, dberror:
+ if txn:
+ txn.abort()
+ raise TableDBError, dberror[1]
+
+
+ def ListTableColumns(self, table):
+ """Return a list of columns in the given table.
+ [] if the table doesn't exist.
+ """
+ assert isinstance(table, StringType)
+ if contains_metastrings(table):
+ raise ValueError, "bad table name: contains reserved metastrings"
+
+ columnlist_key = _columns_key(table)
+ if not self.db.has_key(columnlist_key):
+ return []
+ pickledcolumnlist = self.db.get(columnlist_key)
+ if pickledcolumnlist:
+ return pickle.loads(pickledcolumnlist)
+ else:
+ return []
+
+ def ListTables(self):
+ """Return a list of tables in this database."""
+ pickledtablelist = self.db.get(_table_names_key)
+ if pickledtablelist:
+ return pickle.loads(pickledtablelist)
+ else:
+ return []
+
+ def CreateOrExtendTable(self, table, columns):
+ """CreateOrExtendTable(table, columns)
+
+ Create a new table in the database.
+
+ If a table of this name already exists, extend it to have any
+ additional columns present in the given list as well as
+ all of its current columns.
+ """
+ assert isinstance(columns, ListType)
+ try:
+ self.CreateTable(table, columns)
+ except TableAlreadyExists:
+ # the table already existed, add any new columns
+ txn = None
+ try:
+ columnlist_key = _columns_key(table)
+ txn = self.env.txn_begin()
+
+ # load the current column list
+ oldcolumnlist = pickle.loads(
+ self.db.get(columnlist_key, txn=txn, flags=DB_RMW))
+ # create a hash table for fast lookups of column names in the
+ # loop below
+ oldcolumnhash = {}
+ for c in oldcolumnlist:
+ oldcolumnhash[c] = c
+
+ # create a new column list containing both the old and new
+ # column names
+ newcolumnlist = copy.copy(oldcolumnlist)
+ for c in columns:
+ if not oldcolumnhash.has_key(c):
+ newcolumnlist.append(c)
+
+ # store the table's new extended column list
+ if newcolumnlist != oldcolumnlist :
+ # delete the old one first since we opened with DB_DUP
+ self.db.delete(columnlist_key, txn)
+ self.db.put(columnlist_key,
+ pickle.dumps(newcolumnlist, 1),
+ txn=txn)
+
+ txn.commit()
+ txn = None
+
+ self.__load_column_info(table)
+ except DBError, dberror:
+ if txn:
+ txn.abort()
+ raise TableDBError, dberror[1]
+
+
+ def __load_column_info(self, table) :
+ """initialize the self.__tablecolumns dict"""
+ # check the column names
+ try:
+ tcolpickles = self.db.get(_columns_key(table))
+ except DBNotFoundError:
+ raise TableDBError, "unknown table: %r" % (table,)
+ if not tcolpickles:
+ raise TableDBError, "unknown table: %r" % (table,)
+ self.__tablecolumns[table] = pickle.loads(tcolpickles)
+
+ def __new_rowid(self, table, txn) :
+ """Create a new unique row identifier"""
+ unique = 0
+ while not unique:
+ # Generate a random 64-bit row ID string
+ # (note: this code has <64 bits of randomness
+ # but it's plenty for our database id needs!)
+ p = xdrlib.Packer()
+ p.pack_int(int(random.random()*2147483647))
+ p.pack_int(int(random.random()*2147483647))
+ newid = p.get_buffer()
+
+ # Guarantee uniqueness by adding this key to the database
+ try:
+ self.db.put(_rowid_key(table, newid), None, txn=txn,
+ flags=DB_NOOVERWRITE)
+ except DBKeyExistError:
+ pass
+ else:
+ unique = 1
+
+ return newid
+
+
+ def Insert(self, table, rowdict) :
+ """Insert(table, datadict) - Insert a new row into the table
+ using the keys+values from rowdict as the column values.
+ """
+ txn = None
+ try:
+ if not self.db.has_key(_columns_key(table)):
+ raise TableDBError, "unknown table"
+
+ # check the validity of each column name
+ if not self.__tablecolumns.has_key(table):
+ self.__load_column_info(table)
+ for column in rowdict.keys() :
+ if not self.__tablecolumns[table].count(column):
+ raise TableDBError, "unknown column: %r" % (column,)
+
+ # get a unique row identifier for this row
+ txn = self.env.txn_begin()
+ rowid = self.__new_rowid(table, txn=txn)
+
+ # insert the row values into the table database
+ for column, dataitem in rowdict.items():
+ # store the value
+ self.db.put(_data_key(table, column, rowid), dataitem, txn=txn)
+
+ txn.commit()
+ txn = None
+
+ except DBError, dberror:
+ # WIBNI we could just abort the txn and re-raise the exception?
+ # But no, because TableDBError is not related to DBError via
+ # inheritance, so it would be backwards incompatible. Do the next
+ # best thing.
+ info = sys.exc_info()
+ if txn:
+ txn.abort()
+ self.db.delete(_rowid_key(table, rowid))
+ raise TableDBError, dberror[1], info[2]
+
+
+ def Modify(self, table, conditions={}, mappings={}):
+ """Modify(table, conditions={}, mappings={}) - Modify items in rows matching 'conditions' using mapping functions in 'mappings'
+
+ * table - the table name
+ * conditions - a dictionary keyed on column names containing
+ a condition callable expecting the data string as an
+ argument and returning a boolean.
+ * mappings - a dictionary keyed on column names containing a
+ condition callable expecting the data string as an argument and
+ returning the new string for that column.
+ """
+ try:
+ matching_rowids = self.__Select(table, [], conditions)
+
+ # modify only requested columns
+ columns = mappings.keys()
+ for rowid in matching_rowids.keys():
+ txn = None
+ try:
+ for column in columns:
+ txn = self.env.txn_begin()
+ # modify the requested column
+ try:
+ dataitem = self.db.get(
+ _data_key(table, column, rowid),
+ txn)
+ self.db.delete(
+ _data_key(table, column, rowid),
+ txn)
+ except DBNotFoundError:
+ # XXXXXXX row key somehow didn't exist, assume no
+ # error
+ dataitem = None
+ dataitem = mappings[column](dataitem)
+ if dataitem <> None:
+ self.db.put(
+ _data_key(table, column, rowid),
+ dataitem, txn=txn)
+ txn.commit()
+ txn = None
+
+ # catch all exceptions here since we call unknown callables
+ except:
+ if txn:
+ txn.abort()
+ raise
+
+ except DBError, dberror:
+ raise TableDBError, dberror[1]
+
+ def Delete(self, table, conditions={}):
+ """Delete(table, conditions) - Delete items matching the given
+ conditions from the table.
+
+ * conditions - a dictionary keyed on column names containing
+ condition functions expecting the data string as an
+ argument and returning a boolean.
+ """
+ try:
+ matching_rowids = self.__Select(table, [], conditions)
+
+ # delete row data from all columns
+ columns = self.__tablecolumns[table]
+ for rowid in matching_rowids.keys():
+ txn = None
+ try:
+ txn = self.env.txn_begin()
+ for column in columns:
+ # delete the data key
+ try:
+ self.db.delete(_data_key(table, column, rowid),
+ txn)
+ except DBNotFoundError:
+ # XXXXXXX column may not exist, assume no error
+ pass
+
+ try:
+ self.db.delete(_rowid_key(table, rowid), txn)
+ except DBNotFoundError:
+ # XXXXXXX row key somehow didn't exist, assume no error
+ pass
+ txn.commit()
+ txn = None
+ except DBError, dberror:
+ if txn:
+ txn.abort()
+ raise
+ except DBError, dberror:
+ raise TableDBError, dberror[1]
+
+
+ def Select(self, table, columns, conditions={}):
+ """Select(table, columns, conditions) - retrieve specific row data
+ Returns a list of row column->value mapping dictionaries.
+
+ * columns - a list of which column data to return. If
+ columns is None, all columns will be returned.
+ * conditions - a dictionary keyed on column names
+ containing callable conditions expecting the data string as an
+ argument and returning a boolean.
+ """
+ try:
+ if not self.__tablecolumns.has_key(table):
+ self.__load_column_info(table)
+ if columns is None:
+ columns = self.__tablecolumns[table]
+ matching_rowids = self.__Select(table, columns, conditions)
+ except DBError, dberror:
+ raise TableDBError, dberror[1]
+ # return the matches as a list of dictionaries
+ return matching_rowids.values()
+
+
+ def __Select(self, table, columns, conditions):
+ """__Select() - Used to implement Select and Delete (above)
+ Returns a dictionary keyed on rowids containing dicts
+ holding the row data for columns listed in the columns param
+ that match the given conditions.
+ * conditions is a dictionary keyed on column names
+ containing callable conditions expecting the data string as an
+ argument and returning a boolean.
+ """
+ # check the validity of each column name
+ if not self.__tablecolumns.has_key(table):
+ self.__load_column_info(table)
+ if columns is None:
+ columns = self.tablecolumns[table]
+ for column in (columns + conditions.keys()):
+ if not self.__tablecolumns[table].count(column):
+ raise TableDBError, "unknown column: %r" % (column,)
+
+ # keyed on rows that match so far, containings dicts keyed on
+ # column names containing the data for that row and column.
+ matching_rowids = {}
+ # keys are rowids that do not match
+ rejected_rowids = {}
+
+ # attempt to sort the conditions in such a way as to minimize full
+ # column lookups
+ def cmp_conditions(atuple, btuple):
+ a = atuple[1]
+ b = btuple[1]
+ if type(a) is type(b):
+ if isinstance(a, PrefixCond) and isinstance(b, PrefixCond):
+ # longest prefix first
+ return cmp(len(b.prefix), len(a.prefix))
+ if isinstance(a, LikeCond) and isinstance(b, LikeCond):
+ # longest likestr first
+ return cmp(len(b.likestr), len(a.likestr))
+ return 0
+ if isinstance(a, ExactCond):
+ return -1
+ if isinstance(b, ExactCond):
+ return 1
+ if isinstance(a, PrefixCond):
+ return -1
+ if isinstance(b, PrefixCond):
+ return 1
+ # leave all unknown condition callables alone as equals
+ return 0
+
+ conditionlist = conditions.items()
+ conditionlist.sort(cmp_conditions)
+
+ # Apply conditions to column data to find what we want
+ cur = self.db.cursor()
+ column_num = -1
+ for column, condition in conditionlist:
+ column_num = column_num + 1
+ searchkey = _search_col_data_key(table, column)
+ # speedup: don't linear search columns within loop
+ if column in columns:
+ savethiscolumndata = 1 # save the data for return
+ else:
+ savethiscolumndata = 0 # data only used for selection
+
+ try:
+ key, data = cur.set_range(searchkey)
+ while key[:len(searchkey)] == searchkey:
+ # extract the rowid from the key
+ rowid = key[-_rowid_str_len:]
+
+ if not rejected_rowids.has_key(rowid):
+ # if no condition was specified or the condition
+ # succeeds, add row to our match list.
+ if not condition or condition(data):
+ if not matching_rowids.has_key(rowid):
+ matching_rowids[rowid] = {}
+ if savethiscolumndata:
+ matching_rowids[rowid][column] = data
+ else:
+ if matching_rowids.has_key(rowid):
+ del matching_rowids[rowid]
+ rejected_rowids[rowid] = rowid
+
+ key, data = cur.next()
+
+ except DBError, dberror:
+ if dberror[0] != DB_NOTFOUND:
+ raise
+ continue
+
+ cur.close()
+
+ # we're done selecting rows, garbage collect the reject list
+ del rejected_rowids
+
+ # extract any remaining desired column data from the
+ # database for the matching rows.
+ if len(columns) > 0:
+ for rowid, rowdata in matching_rowids.items():
+ for column in columns:
+ if rowdata.has_key(column):
+ continue
+ try:
+ rowdata[column] = self.db.get(
+ _data_key(table, column, rowid))
+ except DBError, dberror:
+ if dberror[0] != DB_NOTFOUND:
+ raise
+ rowdata[column] = None
+
+ # return the matches
+ return matching_rowids
+
+
+ def Drop(self, table):
+ """Remove an entire table from the database"""
+ txn = None
+ try:
+ txn = self.env.txn_begin()
+
+ # delete the column list
+ self.db.delete(_columns_key(table), txn)
+
+ cur = self.db.cursor(txn)
+
+ # delete all keys containing this tables column and row info
+ table_key = _search_all_data_key(table)
+ while 1:
+ try:
+ key, data = cur.set_range(table_key)
+ except DBNotFoundError:
+ break
+ # only delete items in this table
+ if key[:len(table_key)] != table_key:
+ break
+ cur.delete()
+
+ # delete all rowids used by this table
+ table_key = _search_rowid_key(table)
+ while 1:
+ try:
+ key, data = cur.set_range(table_key)
+ except DBNotFoundError:
+ break
+ # only delete items in this table
+ if key[:len(table_key)] != table_key:
+ break
+ cur.delete()
+
+ cur.close()
+
+ # delete the tablename from the table name list
+ tablelist = pickle.loads(
+ self.db.get(_table_names_key, txn=txn, flags=DB_RMW))
+ try:
+ tablelist.remove(table)
+ except ValueError:
+ # hmm, it wasn't there, oh well, that's what we want.
+ pass
+ # delete 1st, incase we opened with DB_DUP
+ self.db.delete(_table_names_key, txn)
+ self.db.put(_table_names_key, pickle.dumps(tablelist, 1), txn=txn)
+
+ txn.commit()
+ txn = None
+
+ if self.__tablecolumns.has_key(table):
+ del self.__tablecolumns[table]
+
+ except DBError, dberror:
+ if txn:
+ txn.abort()
+ raise TableDBError, dberror[1]
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/bsddb/dbutils.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,77 @@
+#------------------------------------------------------------------------
+#
+# Copyright (C) 2000 Autonomous Zone Industries
+#
+# License: This is free software. You may use this software for any
+# purpose including modification/redistribution, so long as
+# this header remains intact and that you do not claim any
+# rights of ownership or authorship of this software. This
+# software has been tested, but no warranty is expressed or
+# implied.
+#
+# Author: Gregory P. Smith <greg@electricrain.com>
+#
+# Note: I don't know how useful this is in reality since when a
+# DBLockDeadlockError happens the current transaction is supposed to be
+# aborted. If it doesn't then when the operation is attempted again
+# the deadlock is still happening...
+# --Robin
+#
+#------------------------------------------------------------------------
+
+
+#
+# import the time.sleep function in a namespace safe way to allow
+# "from bsddb.dbutils import *"
+#
+from time import sleep as _sleep
+
+import db
+
+# always sleep at least N seconds between retrys
+_deadlock_MinSleepTime = 1.0/128
+# never sleep more than N seconds between retrys
+_deadlock_MaxSleepTime = 3.14159
+
+# Assign a file object to this for a "sleeping" message to be written to it
+# each retry
+_deadlock_VerboseFile = None
+
+
+def DeadlockWrap(function, *_args, **_kwargs):
+ """DeadlockWrap(function, *_args, **_kwargs) - automatically retries
+ function in case of a database deadlock.
+
+ This is a function intended to be used to wrap database calls such
+ that they perform retrys with exponentially backing off sleeps in
+ between when a DBLockDeadlockError exception is raised.
+
+ A 'max_retries' parameter may optionally be passed to prevent it
+ from retrying forever (in which case the exception will be reraised).
+
+ d = DB(...)
+ d.open(...)
+ DeadlockWrap(d.put, "foo", data="bar") # set key "foo" to "bar"
+ """
+ sleeptime = _deadlock_MinSleepTime
+ max_retries = _kwargs.get('max_retries', -1)
+ if _kwargs.has_key('max_retries'):
+ del _kwargs['max_retries']
+ while True:
+ try:
+ return function(*_args, **_kwargs)
+ except db.DBLockDeadlockError:
+ if _deadlock_VerboseFile:
+ _deadlock_VerboseFile.write(
+ 'dbutils.DeadlockWrap: sleeping %1.3f\n' % sleeptime)
+ _sleep(sleeptime)
+ # exponential backoff in the sleep time
+ sleeptime *= 2
+ if sleeptime > _deadlock_MaxSleepTime:
+ sleeptime = _deadlock_MaxSleepTime
+ max_retries -= 1
+ if max_retries == -1:
+ raise
+
+
+#------------------------------------------------------------------------
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/cProfile.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,190 @@
+#! /usr/bin/env python
+
+"""Python interface for the 'lsprof' profiler.
+ Compatible with the 'profile' module.
+"""
+
+__all__ = ["run", "runctx", "help", "Profile"]
+
+import _lsprof
+
+# ____________________________________________________________
+# Simple interface
+
+def run(statement, filename=None, sort=-1):
+ """Run statement under profiler optionally saving results in filename
+
+ This function takes a single argument that can be passed to the
+ "exec" statement, and an optional file name. In all cases this
+ routine attempts to "exec" its first argument and gather profiling
+ statistics from the execution. If no file name is present, then this
+ function automatically prints a simple profiling report, sorted by the
+ standard name string (file/line/function-name) that is presented in
+ each line.
+ """
+ prof = Profile()
+ result = None
+ try:
+ try:
+ prof = prof.run(statement)
+ except SystemExit:
+ pass
+ finally:
+ if filename is not None:
+ prof.dump_stats(filename)
+ else:
+ result = prof.print_stats(sort)
+ return result
+
+def runctx(statement, globals, locals, filename=None):
+ """Run statement under profiler, supplying your own globals and locals,
+ optionally saving results in filename.
+
+ statement and filename have the same semantics as profile.run
+ """
+ prof = Profile()
+ result = None
+ try:
+ try:
+ prof = prof.runctx(statement, globals, locals)
+ except SystemExit:
+ pass
+ finally:
+ if filename is not None:
+ prof.dump_stats(filename)
+ else:
+ result = prof.print_stats()
+ return result
+
+# Backwards compatibility.
+def help():
+ print "Documentation for the profile/cProfile modules can be found "
+ print "in the Python Library Reference, section 'The Python Profiler'."
+
+# ____________________________________________________________
+
+class Profile(_lsprof.Profiler):
+ """Profile(custom_timer=None, time_unit=None, subcalls=True, builtins=True)
+
+ Builds a profiler object using the specified timer function.
+ The default timer is a fast built-in one based on real time.
+ For custom timer functions returning integers, time_unit can
+ be a float specifying a scale (i.e. how long each integer unit
+ is, in seconds).
+ """
+
+ # Most of the functionality is in the base class.
+ # This subclass only adds convenient and backward-compatible methods.
+
+ def print_stats(self, sort=-1):
+ import pstats
+ pstats.Stats(self).strip_dirs().sort_stats(sort).print_stats()
+
+ def dump_stats(self, file):
+ import marshal
+ f = open(file, 'wb')
+ self.create_stats()
+ marshal.dump(self.stats, f)
+ f.close()
+
+ def create_stats(self):
+ self.disable()
+ self.snapshot_stats()
+
+ def snapshot_stats(self):
+ entries = self.getstats()
+ self.stats = {}
+ callersdicts = {}
+ # call information
+ for entry in entries:
+ func = label(entry.code)
+ nc = entry.callcount # ncalls column of pstats (before '/')
+ cc = nc - entry.reccallcount # ncalls column of pstats (after '/')
+ tt = entry.inlinetime # tottime column of pstats
+ ct = entry.totaltime # cumtime column of pstats
+ callers = {}
+ callersdicts[id(entry.code)] = callers
+ self.stats[func] = cc, nc, tt, ct, callers
+ # subcall information
+ for entry in entries:
+ if entry.calls:
+ func = label(entry.code)
+ for subentry in entry.calls:
+ try:
+ callers = callersdicts[id(subentry.code)]
+ except KeyError:
+ continue
+ nc = subentry.callcount
+ cc = nc - subentry.reccallcount
+ tt = subentry.inlinetime
+ ct = subentry.totaltime
+ if func in callers:
+ prev = callers[func]
+ nc += prev[0]
+ cc += prev[1]
+ tt += prev[2]
+ ct += prev[3]
+ callers[func] = nc, cc, tt, ct
+
+ # The following two methods can be called by clients to use
+ # a profiler to profile a statement, given as a string.
+
+ def run(self, cmd):
+ import __main__
+ dict = __main__.__dict__
+ return self.runctx(cmd, dict, dict)
+
+ def runctx(self, cmd, globals, locals):
+ self.enable()
+ try:
+ exec cmd in globals, locals
+ finally:
+ self.disable()
+ return self
+
+ # This method is more useful to profile a single function call.
+ def runcall(self, func, *args, **kw):
+ self.enable()
+ try:
+ return func(*args, **kw)
+ finally:
+ self.disable()
+
+# ____________________________________________________________
+
+def label(code):
+ if isinstance(code, str):
+ return ('~', 0, code) # built-in functions ('~' sorts at the end)
+ else:
+ return (code.co_filename, code.co_firstlineno, code.co_name)
+
+# ____________________________________________________________
+
+def main():
+ import os, sys
+ from optparse import OptionParser
+ usage = "cProfile.py [-o output_file_path] [-s sort] scriptfile [arg] ..."
+ parser = OptionParser(usage=usage)
+ parser.allow_interspersed_args = False
+ parser.add_option('-o', '--outfile', dest="outfile",
+ help="Save stats to <outfile>", default=None)
+ parser.add_option('-s', '--sort', dest="sort",
+ help="Sort order when printing to stdout, based on pstats.Stats class", default=-1)
+
+ if not sys.argv[1:]:
+ parser.print_usage()
+ sys.exit(2)
+
+ (options, args) = parser.parse_args()
+ sys.argv[:] = args
+
+ if (len(sys.argv) > 0):
+ sys.path.insert(0, os.path.dirname(sys.argv[0]))
+ run('execfile(%r)' % (sys.argv[0],), options.outfile, options.sort)
+ else:
+ parser.print_usage()
+ return parser
+
+# When invoked as main program, invoke the profiler on a script
+if __name__ == '__main__':
+ main()
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/calendar.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,701 @@
+"""Calendar printing functions
+
+Note when comparing these calendars to the ones printed by cal(1): By
+default, these calendars have Monday as the first day of the week, and
+Sunday as the last (the European convention). Use setfirstweekday() to
+set the first day of the week (0=Monday, 6=Sunday)."""
+
+from __future__ import with_statement
+import sys, datetime, locale
+
+__all__ = ["IllegalMonthError", "IllegalWeekdayError", "setfirstweekday",
+ "firstweekday", "isleap", "leapdays", "weekday", "monthrange",
+ "monthcalendar", "prmonth", "month", "prcal", "calendar",
+ "timegm", "month_name", "month_abbr", "day_name", "day_abbr"]
+
+# Exception raised for bad input (with string parameter for details)
+error = ValueError
+
+# Exceptions raised for bad input
+class IllegalMonthError(ValueError):
+ def __init__(self, month):
+ self.month = month
+ def __str__(self):
+ return "bad month number %r; must be 1-12" % self.month
+
+
+class IllegalWeekdayError(ValueError):
+ def __init__(self, weekday):
+ self.weekday = weekday
+ def __str__(self):
+ return "bad weekday number %r; must be 0 (Monday) to 6 (Sunday)" % self.weekday
+
+
+# Constants for months referenced later
+January = 1
+February = 2
+
+# Number of days per month (except for February in leap years)
+mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
+
+# This module used to have hard-coded lists of day and month names, as
+# English strings. The classes following emulate a read-only version of
+# that, but supply localized names. Note that the values are computed
+# fresh on each call, in case the user changes locale between calls.
+
+class _localized_month:
+
+ _months = [datetime.date(2001, i+1, 1).strftime for i in xrange(12)]
+ _months.insert(0, lambda x: "")
+
+ def __init__(self, format):
+ self.format = format
+
+ def __getitem__(self, i):
+ funcs = self._months[i]
+ if isinstance(i, slice):
+ return [f(self.format) for f in funcs]
+ else:
+ return funcs(self.format)
+
+ def __len__(self):
+ return 13
+
+
+class _localized_day:
+
+ # January 1, 2001, was a Monday.
+ _days = [datetime.date(2001, 1, i+1).strftime for i in xrange(7)]
+
+ def __init__(self, format):
+ self.format = format
+
+ def __getitem__(self, i):
+ funcs = self._days[i]
+ if isinstance(i, slice):
+ return [f(self.format) for f in funcs]
+ else:
+ return funcs(self.format)
+
+ def __len__(self):
+ return 7
+
+
+# Full and abbreviated names of weekdays
+day_name = _localized_day('%A')
+day_abbr = _localized_day('%a')
+
+# Full and abbreviated names of months (1-based arrays!!!)
+month_name = _localized_month('%B')
+month_abbr = _localized_month('%b')
+
+# Constants for weekdays
+(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7)
+
+
+def isleap(year):
+ """Return 1 for leap years, 0 for non-leap years."""
+ return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
+
+
+def leapdays(y1, y2):
+ """Return number of leap years in range [y1, y2).
+ Assume y1 <= y2."""
+ y1 -= 1
+ y2 -= 1
+ return (y2//4 - y1//4) - (y2//100 - y1//100) + (y2//400 - y1//400)
+
+
+def weekday(year, month, day):
+ """Return weekday (0-6 ~ Mon-Sun) for year (1970-...), month (1-12),
+ day (1-31)."""
+ return datetime.date(year, month, day).weekday()
+
+
+def monthrange(year, month):
+ """Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for
+ year, month."""
+ if not 1 <= month <= 12:
+ raise IllegalMonthError(month)
+ day1 = weekday(year, month, 1)
+ ndays = mdays[month] + (month == February and isleap(year))
+ return day1, ndays
+
+
+class Calendar(object):
+ """
+ Base calendar class. This class doesn't do any formatting. It simply
+ provides data to subclasses.
+ """
+
+ def __init__(self, firstweekday=0):
+ self.firstweekday = firstweekday # 0 = Monday, 6 = Sunday
+
+ def getfirstweekday(self):
+ return self._firstweekday % 7
+
+ def setfirstweekday(self, firstweekday):
+ self._firstweekday = firstweekday
+
+ firstweekday = property(getfirstweekday, setfirstweekday)
+
+ def iterweekdays(self):
+ """
+ Return a iterator for one week of weekday numbers starting with the
+ configured first one.
+ """
+ for i in xrange(self.firstweekday, self.firstweekday + 7):
+ yield i%7
+
+ def itermonthdates(self, year, month):
+ """
+ Return an iterator for one month. The iterator will yield datetime.date
+ values and will always iterate through complete weeks, so it will yield
+ dates outside the specified month.
+ """
+ date = datetime.date(year, month, 1)
+ # Go back to the beginning of the week
+ days = (date.weekday() - self.firstweekday) % 7
+ date -= datetime.timedelta(days=days)
+ oneday = datetime.timedelta(days=1)
+ while True:
+ yield date
+ date += oneday
+ if date.month != month and date.weekday() == self.firstweekday:
+ break
+
+ def itermonthdays2(self, year, month):
+ """
+ Like itermonthdates(), but will yield (day number, weekday number)
+ tuples. For days outside the specified month the day number is 0.
+ """
+ for date in self.itermonthdates(year, month):
+ if date.month != month:
+ yield (0, date.weekday())
+ else:
+ yield (date.day, date.weekday())
+
+ def itermonthdays(self, year, month):
+ """
+ Like itermonthdates(), but will yield day numbers tuples. For days
+ outside the specified month the day number is 0.
+ """
+ for date in self.itermonthdates(year, month):
+ if date.month != month:
+ yield 0
+ else:
+ yield date.day
+
+ def monthdatescalendar(self, year, month):
+ """
+ Return a matrix (list of lists) representing a month's calendar.
+ Each row represents a week; week entries are datetime.date values.
+ """
+ dates = list(self.itermonthdates(year, month))
+ return [ dates[i:i+7] for i in xrange(0, len(dates), 7) ]
+
+ def monthdays2calendar(self, year, month):
+ """
+ Return a matrix representing a month's calendar.
+ Each row represents a week; week entries are
+ (day number, weekday number) tuples. Day numbers outside this month
+ are zero.
+ """
+ days = list(self.itermonthdays2(year, month))
+ return [ days[i:i+7] for i in xrange(0, len(days), 7) ]
+
+ def monthdayscalendar(self, year, month):
+ """
+ Return a matrix representing a month's calendar.
+ Each row represents a week; days outside this month are zero.
+ """
+ days = list(self.itermonthdays(year, month))
+ return [ days[i:i+7] for i in xrange(0, len(days), 7) ]
+
+ def yeardatescalendar(self, year, width=3):
+ """
+ Return the data for the specified year ready for formatting. The return
+ value is a list of month rows. Each month row contains upto width months.
+ Each month contains between 4 and 6 weeks and each week contains 1-7
+ days. Days are datetime.date objects.
+ """
+ months = [
+ self.monthdatescalendar(year, i)
+ for i in xrange(January, January+12)
+ ]
+ return [months[i:i+width] for i in xrange(0, len(months), width) ]
+
+ def yeardays2calendar(self, year, width=3):
+ """
+ Return the data for the specified year ready for formatting (similar to
+ yeardatescalendar()). Entries in the week lists are
+ (day number, weekday number) tuples. Day numbers outside this month are
+ zero.
+ """
+ months = [
+ self.monthdays2calendar(year, i)
+ for i in xrange(January, January+12)
+ ]
+ return [months[i:i+width] for i in xrange(0, len(months), width) ]
+
+ def yeardayscalendar(self, year, width=3):
+ """
+ Return the data for the specified year ready for formatting (similar to
+ yeardatescalendar()). Entries in the week lists are day numbers.
+ Day numbers outside this month are zero.
+ """
+ months = [
+ self.monthdayscalendar(year, i)
+ for i in xrange(January, January+12)
+ ]
+ return [months[i:i+width] for i in xrange(0, len(months), width) ]
+
+
+class TextCalendar(Calendar):
+ """
+ Subclass of Calendar that outputs a calendar as a simple plain text
+ similar to the UNIX program cal.
+ """
+
+ def prweek(self, theweek, width):
+ """
+ Print a single week (no newline).
+ """
+ print self.week(theweek, width),
+
+ def formatday(self, day, weekday, width):
+ """
+ Returns a formatted day.
+ """
+ if day == 0:
+ s = ''
+ else:
+ s = '%2i' % day # right-align single-digit days
+ return s.center(width)
+
+ def formatweek(self, theweek, width):
+ """
+ Returns a single week in a string (no newline).
+ """
+ return ' '.join(self.formatday(d, wd, width) for (d, wd) in theweek)
+
+ def formatweekday(self, day, width):
+ """
+ Returns a formatted week day name.
+ """
+ if width >= 9:
+ names = day_name
+ else:
+ names = day_abbr
+ return names[day][:width].center(width)
+
+ def formatweekheader(self, width):
+ """
+ Return a header for a week.
+ """
+ return ' '.join(self.formatweekday(i, width) for i in self.iterweekdays())
+
+ def formatmonthname(self, theyear, themonth, width, withyear=True):
+ """
+ Return a formatted month name.
+ """
+ s = month_name[themonth]
+ if withyear:
+ s = "%s %r" % (s, theyear)
+ return s.center(width)
+
+ def prmonth(self, theyear, themonth, w=0, l=0):
+ """
+ Print a month's calendar.
+ """
+ print self.formatmonth(theyear, themonth, w, l),
+
+ def formatmonth(self, theyear, themonth, w=0, l=0):
+ """
+ Return a month's calendar string (multi-line).
+ """
+ w = max(2, w)
+ l = max(1, l)
+ s = self.formatmonthname(theyear, themonth, 7 * (w + 1) - 1)
+ s = s.rstrip()
+ s += '\n' * l
+ s += self.formatweekheader(w).rstrip()
+ s += '\n' * l
+ for week in self.monthdays2calendar(theyear, themonth):
+ s += self.formatweek(week, w).rstrip()
+ s += '\n' * l
+ return s
+
+ def formatyear(self, theyear, w=2, l=1, c=6, m=3):
+ """
+ Returns a year's calendar as a multi-line string.
+ """
+ w = max(2, w)
+ l = max(1, l)
+ c = max(2, c)
+ colwidth = (w + 1) * 7 - 1
+ v = []
+ a = v.append
+ a(repr(theyear).center(colwidth*m+c*(m-1)).rstrip())
+ a('\n'*l)
+ header = self.formatweekheader(w)
+ for (i, row) in enumerate(self.yeardays2calendar(theyear, m)):
+ # months in this row
+ months = xrange(m*i+1, min(m*(i+1)+1, 13))
+ a('\n'*l)
+ names = (self.formatmonthname(theyear, k, colwidth, False)
+ for k in months)
+ a(formatstring(names, colwidth, c).rstrip())
+ a('\n'*l)
+ headers = (header for k in months)
+ a(formatstring(headers, colwidth, c).rstrip())
+ a('\n'*l)
+ # max number of weeks for this row
+ height = max(len(cal) for cal in row)
+ for j in xrange(height):
+ weeks = []
+ for cal in row:
+ if j >= len(cal):
+ weeks.append('')
+ else:
+ weeks.append(self.formatweek(cal[j], w))
+ a(formatstring(weeks, colwidth, c).rstrip())
+ a('\n' * l)
+ return ''.join(v)
+
+ def pryear(self, theyear, w=0, l=0, c=6, m=3):
+ """Print a year's calendar."""
+ print self.formatyear(theyear, w, l, c, m)
+
+
+class HTMLCalendar(Calendar):
+ """
+ This calendar returns complete HTML pages.
+ """
+
+ # CSS classes for the day <td>s
+ cssclasses = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
+
+ def formatday(self, day, weekday):
+ """
+ Return a day as a table cell.
+ """
+ if day == 0:
+ return '<td class="noday"> </td>' # day outside month
+ else:
+ return '<td class="%s">%d</td>' % (self.cssclasses[weekday], day)
+
+ def formatweek(self, theweek):
+ """
+ Return a complete week as a table row.
+ """
+ s = ''.join(self.formatday(d, wd) for (d, wd) in theweek)
+ return '<tr>%s</tr>' % s
+
+ def formatweekday(self, day):
+ """
+ Return a weekday name as a table header.
+ """
+ return '<th class="%s">%s</th>' % (self.cssclasses[day], day_abbr[day])
+
+ def formatweekheader(self):
+ """
+ Return a header for a week as a table row.
+ """
+ s = ''.join(self.formatweekday(i) for i in self.iterweekdays())
+ return '<tr>%s</tr>' % s
+
+ def formatmonthname(self, theyear, themonth, withyear=True):
+ """
+ Return a month name as a table row.
+ """
+ if withyear:
+ s = '%s %s' % (month_name[themonth], theyear)
+ else:
+ s = '%s' % month_name[themonth]
+ return '<tr><th colspan="7" class="month">%s</th></tr>' % s
+
+ def formatmonth(self, theyear, themonth, withyear=True):
+ """
+ Return a formatted month as a table.
+ """
+ v = []
+ a = v.append
+ a('<table border="0" cellpadding="0" cellspacing="0" class="month">')
+ a('\n')
+ a(self.formatmonthname(theyear, themonth, withyear=withyear))
+ a('\n')
+ a(self.formatweekheader())
+ a('\n')
+ for week in self.monthdays2calendar(theyear, themonth):
+ a(self.formatweek(week))
+ a('\n')
+ a('</table>')
+ a('\n')
+ return ''.join(v)
+
+ def formatyear(self, theyear, width=3):
+ """
+ Return a formatted year as a table of tables.
+ """
+ v = []
+ a = v.append
+ width = max(width, 1)
+ a('<table border="0" cellpadding="0" cellspacing="0" class="year">')
+ a('\n')
+ a('<tr><th colspan="%d" class="year">%s</th></tr>' % (width, theyear))
+ for i in xrange(January, January+12, width):
+ # months in this row
+ months = xrange(i, min(i+width, 13))
+ a('<tr>')
+ for m in months:
+ a('<td>')
+ a(self.formatmonth(theyear, m, withyear=False))
+ a('</td>')
+ a('</tr>')
+ a('</table>')
+ return ''.join(v)
+
+ def formatyearpage(self, theyear, width=3, css='calendar.css', encoding=None):
+ """
+ Return a formatted year as a complete HTML page.
+ """
+ if encoding is None:
+ encoding = sys.getdefaultencoding()
+ v = []
+ a = v.append
+ a('<?xml version="1.0" encoding="%s"?>\n' % encoding)
+ a('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n')
+ a('<html>\n')
+ a('<head>\n')
+ a('<meta http-equiv="Content-Type" content="text/html; charset=%s" />\n' % encoding)
+ if css is not None:
+ a('<link rel="stylesheet" type="text/css" href="%s" />\n' % css)
+ a('<title>Calendar for %d</title\n' % theyear)
+ a('</head>\n')
+ a('<body>\n')
+ a(self.formatyear(theyear, width))
+ a('</body>\n')
+ a('</html>\n')
+ return ''.join(v).encode(encoding, "xmlcharrefreplace")
+
+
+class TimeEncoding:
+ def __init__(self, locale):
+ self.locale = locale
+
+ def __enter__(self):
+ self.oldlocale = locale.setlocale(locale.LC_TIME, self.locale)
+ return locale.getlocale(locale.LC_TIME)[1]
+
+ def __exit__(self, *args):
+ locale.setlocale(locale.LC_TIME, self.oldlocale)
+
+
+class LocaleTextCalendar(TextCalendar):
+ """
+ This class can be passed a locale name in the constructor and will return
+ month and weekday names in the specified locale. If this locale includes
+ an encoding all strings containing month and weekday names will be returned
+ as unicode.
+ """
+
+ def __init__(self, firstweekday=0, locale=None):
+ TextCalendar.__init__(self, firstweekday)
+ if locale is None:
+ locale = locale.getdefaultlocale()
+ self.locale = locale
+
+ def formatweekday(self, day, width):
+ with TimeEncoding(self.locale) as encoding:
+ if width >= 9:
+ names = day_name
+ else:
+ names = day_abbr
+ name = names[day]
+ if encoding is not None:
+ name = name.decode(encoding)
+ return name[:width].center(width)
+
+ def formatmonthname(self, theyear, themonth, width, withyear=True):
+ with TimeEncoding(self.locale) as encoding:
+ s = month_name[themonth]
+ if encoding is not None:
+ s = s.decode(encoding)
+ if withyear:
+ s = "%s %r" % (s, theyear)
+ return s.center(width)
+
+
+class LocaleHTMLCalendar(HTMLCalendar):
+ """
+ This class can be passed a locale name in the constructor and will return
+ month and weekday names in the specified locale. If this locale includes
+ an encoding all strings containing month and weekday names will be returned
+ as unicode.
+ """
+ def __init__(self, firstweekday=0, locale=None):
+ HTMLCalendar.__init__(self, firstweekday)
+ if locale is None:
+ locale = locale.getdefaultlocale()
+ self.locale = locale
+
+ def formatweekday(self, day):
+ with TimeEncoding(self.locale) as encoding:
+ s = day_abbr[day]
+ if encoding is not None:
+ s = s.decode(encoding)
+ return '<th class="%s">%s</th>' % (self.cssclasses[day], s)
+
+ def formatmonthname(self, theyear, themonth, withyear=True):
+ with TimeEncoding(self.locale) as encoding:
+ s = month_name[themonth]
+ if encoding is not None:
+ s = s.decode(encoding)
+ if withyear:
+ s = '%s %s' % (s, theyear)
+ return '<tr><th colspan="7" class="month">%s</th></tr>' % s
+
+
+# Support for old module level interface
+c = TextCalendar()
+
+firstweekday = c.getfirstweekday
+
+def setfirstweekday(firstweekday):
+ if not MONDAY <= firstweekday <= SUNDAY:
+ raise IllegalWeekdayError(firstweekday)
+ c.firstweekday = firstweekday
+
+monthcalendar = c.monthdayscalendar
+prweek = c.prweek
+week = c.formatweek
+weekheader = c.formatweekheader
+prmonth = c.prmonth
+month = c.formatmonth
+calendar = c.formatyear
+prcal = c.pryear
+
+
+# Spacing of month columns for multi-column year calendar
+_colwidth = 7*3 - 1 # Amount printed by prweek()
+_spacing = 6 # Number of spaces between columns
+
+
+def format(cols, colwidth=_colwidth, spacing=_spacing):
+ """Prints multi-column formatting for year calendars"""
+ print formatstring(cols, colwidth, spacing)
+
+
+def formatstring(cols, colwidth=_colwidth, spacing=_spacing):
+ """Returns a string formatted from n strings, centered within n columns."""
+ spacing *= ' '
+ return spacing.join(c.center(colwidth) for c in cols)
+
+
+EPOCH = 1970
+_EPOCH_ORD = datetime.date(EPOCH, 1, 1).toordinal()
+
+
+def timegm(tuple):
+ """Unrelated but handy function to calculate Unix timestamp from GMT."""
+ year, month, day, hour, minute, second = tuple[:6]
+ days = datetime.date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1
+ hours = days*24 + hour
+ minutes = hours*60 + minute
+ seconds = minutes*60 + second
+ return seconds
+
+
+def main(args):
+ import optparse
+ parser = optparse.OptionParser(usage="usage: %prog [options] [year [month]]")
+ parser.add_option(
+ "-w", "--width",
+ dest="width", type="int", default=2,
+ help="width of date column (default 2, text only)"
+ )
+ parser.add_option(
+ "-l", "--lines",
+ dest="lines", type="int", default=1,
+ help="number of lines for each week (default 1, text only)"
+ )
+ parser.add_option(
+ "-s", "--spacing",
+ dest="spacing", type="int", default=6,
+ help="spacing between months (default 6, text only)"
+ )
+ parser.add_option(
+ "-m", "--months",
+ dest="months", type="int", default=3,
+ help="months per row (default 3, text only)"
+ )
+ parser.add_option(
+ "-c", "--css",
+ dest="css", default="calendar.css",
+ help="CSS to use for page (html only)"
+ )
+ parser.add_option(
+ "-L", "--locale",
+ dest="locale", default=None,
+ help="locale to be used from month and weekday names"
+ )
+ parser.add_option(
+ "-e", "--encoding",
+ dest="encoding", default=None,
+ help="Encoding to use for output"
+ )
+ parser.add_option(
+ "-t", "--type",
+ dest="type", default="text",
+ choices=("text", "html"),
+ help="output type (text or html)"
+ )
+
+ (options, args) = parser.parse_args(args)
+
+ if options.locale and not options.encoding:
+ parser.error("if --locale is specified --encoding is required")
+ sys.exit(1)
+
+ if options.type == "html":
+ if options.locale:
+ cal = LocaleHTMLCalendar(locale=options.locale)
+ else:
+ cal = HTMLCalendar()
+ encoding = options.encoding
+ if encoding is None:
+ encoding = sys.getdefaultencoding()
+ optdict = dict(encoding=encoding, css=options.css)
+ if len(args) == 1:
+ print cal.formatyearpage(datetime.date.today().year, **optdict)
+ elif len(args) == 2:
+ print cal.formatyearpage(int(args[1]), **optdict)
+ else:
+ parser.error("incorrect number of arguments")
+ sys.exit(1)
+ else:
+ if options.locale:
+ cal = LocaleTextCalendar(locale=options.locale)
+ else:
+ cal = TextCalendar()
+ optdict = dict(w=options.width, l=options.lines)
+ if len(args) != 3:
+ optdict["c"] = options.spacing
+ optdict["m"] = options.months
+ if len(args) == 1:
+ result = cal.formatyear(datetime.date.today().year, **optdict)
+ elif len(args) == 2:
+ result = cal.formatyear(int(args[1]), **optdict)
+ elif len(args) == 3:
+ result = cal.formatmonth(int(args[1]), int(args[2]), **optdict)
+ else:
+ parser.error("incorrect number of arguments")
+ sys.exit(1)
+ if options.encoding:
+ result = result.encode(options.encoding)
+ print result
+
+
+if __name__ == "__main__":
+ main(sys.argv)
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/cgi.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,1071 @@
+#! /usr/local/bin/python
+
+# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is
+# intentionally NOT "/usr/bin/env python". On many systems
+# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI
+# scripts, and /usr/local/bin is the default directory where Python is
+# installed, so /usr/bin/env would be unable to find python. Granted,
+# binary installations by Linux vendors often install Python in
+# /usr/bin. So let those vendors patch cgi.py to match their choice
+# of installation.
+
+"""Support module for CGI (Common Gateway Interface) scripts.
+
+This module defines a number of utilities for use by CGI scripts
+written in Python.
+"""
+
+# XXX Perhaps there should be a slimmed version that doesn't contain
+# all those backwards compatible and debugging classes and functions?
+
+# History
+# -------
+#
+# Michael McLay started this module. Steve Majewski changed the
+# interface to SvFormContentDict and FormContentDict. The multipart
+# parsing was inspired by code submitted by Andreas Paepcke. Guido van
+# Rossum rewrote, reformatted and documented the module and is currently
+# responsible for its maintenance.
+#
+
+__version__ = "2.6"
+
+
+# Imports
+# =======
+
+from operator import attrgetter
+import sys
+import os
+import urllib
+import mimetools
+import rfc822
+import UserDict
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+__all__ = ["MiniFieldStorage", "FieldStorage", "FormContentDict",
+ "SvFormContentDict", "InterpFormContentDict", "FormContent",
+ "parse", "parse_qs", "parse_qsl", "parse_multipart",
+ "parse_header", "print_exception", "print_environ",
+ "print_form", "print_directory", "print_arguments",
+ "print_environ_usage", "escape"]
+
+# Logging support
+# ===============
+
+logfile = "" # Filename to log to, if not empty
+logfp = None # File object to log to, if not None
+
+def initlog(*allargs):
+ """Write a log message, if there is a log file.
+
+ Even though this function is called initlog(), you should always
+ use log(); log is a variable that is set either to initlog
+ (initially), to dolog (once the log file has been opened), or to
+ nolog (when logging is disabled).
+
+ The first argument is a format string; the remaining arguments (if
+ any) are arguments to the % operator, so e.g.
+ log("%s: %s", "a", "b")
+ will write "a: b" to the log file, followed by a newline.
+
+ If the global logfp is not None, it should be a file object to
+ which log data is written.
+
+ If the global logfp is None, the global logfile may be a string
+ giving a filename to open, in append mode. This file should be
+ world writable!!! If the file can't be opened, logging is
+ silently disabled (since there is no safe place where we could
+ send an error message).
+
+ """
+ global logfp, log
+ if logfile and not logfp:
+ try:
+ logfp = open(logfile, "a")
+ except IOError:
+ pass
+ if not logfp:
+ log = nolog
+ else:
+ log = dolog
+ log(*allargs)
+
+def dolog(fmt, *args):
+ """Write a log message to the log file. See initlog() for docs."""
+ logfp.write(fmt%args + "\n")
+
+def nolog(*allargs):
+ """Dummy function, assigned to log when logging is disabled."""
+ pass
+
+log = initlog # The current logging function
+
+
+# Parsing functions
+# =================
+
+# Maximum input we will accept when REQUEST_METHOD is POST
+# 0 ==> unlimited input
+maxlen = 0
+
+def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
+ """Parse a query in the environment or from a file (default stdin)
+
+ Arguments, all optional:
+
+ fp : file pointer; default: sys.stdin
+
+ environ : environment dictionary; default: os.environ
+
+ keep_blank_values: flag indicating whether blank values in
+ URL encoded forms should be treated as blank strings.
+ A true value indicates that blanks should be retained as
+ blank strings. The default false value indicates that
+ blank values are to be ignored and treated as if they were
+ not included.
+
+ strict_parsing: flag indicating what to do with parsing errors.
+ If false (the default), errors are silently ignored.
+ If true, errors raise a ValueError exception.
+ """
+ if fp is None:
+ fp = sys.stdin
+ if not 'REQUEST_METHOD' in environ:
+ environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone
+ if environ['REQUEST_METHOD'] == 'POST':
+ ctype, pdict = parse_header(environ['CONTENT_TYPE'])
+ if ctype == 'multipart/form-data':
+ return parse_multipart(fp, pdict)
+ elif ctype == 'application/x-www-form-urlencoded':
+ clength = int(environ['CONTENT_LENGTH'])
+ if maxlen and clength > maxlen:
+ raise ValueError, 'Maximum content length exceeded'
+ qs = fp.read(clength)
+ else:
+ qs = '' # Unknown content-type
+ if 'QUERY_STRING' in environ:
+ if qs: qs = qs + '&'
+ qs = qs + environ['QUERY_STRING']
+ elif sys.argv[1:]:
+ if qs: qs = qs + '&'
+ qs = qs + sys.argv[1]
+ environ['QUERY_STRING'] = qs # XXX Shouldn't, really
+ elif 'QUERY_STRING' in environ:
+ qs = environ['QUERY_STRING']
+ else:
+ if sys.argv[1:]:
+ qs = sys.argv[1]
+ else:
+ qs = ""
+ environ['QUERY_STRING'] = qs # XXX Shouldn't, really
+ return parse_qs(qs, keep_blank_values, strict_parsing)
+
+
+def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
+ """Parse a query given as a string argument.
+
+ Arguments:
+
+ qs: URL-encoded query string to be parsed
+
+ keep_blank_values: flag indicating whether blank values in
+ URL encoded queries should be treated as blank strings.
+ A true value indicates that blanks should be retained as
+ blank strings. The default false value indicates that
+ blank values are to be ignored and treated as if they were
+ not included.
+
+ strict_parsing: flag indicating what to do with parsing errors.
+ If false (the default), errors are silently ignored.
+ If true, errors raise a ValueError exception.
+ """
+ dict = {}
+ for name, value in parse_qsl(qs, keep_blank_values, strict_parsing):
+ if name in dict:
+ dict[name].append(value)
+ else:
+ dict[name] = [value]
+ return dict
+
+def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
+ """Parse a query given as a string argument.
+
+ Arguments:
+
+ qs: URL-encoded query string to be parsed
+
+ keep_blank_values: flag indicating whether blank values in
+ URL encoded queries should be treated as blank strings. A
+ true value indicates that blanks should be retained as blank
+ strings. The default false value indicates that blank values
+ are to be ignored and treated as if they were not included.
+
+ strict_parsing: flag indicating what to do with parsing errors. If
+ false (the default), errors are silently ignored. If true,
+ errors raise a ValueError exception.
+
+ Returns a list, as G-d intended.
+ """
+ pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
+ r = []
+ for name_value in pairs:
+ if not name_value and not strict_parsing:
+ continue
+ nv = name_value.split('=', 1)
+ if len(nv) != 2:
+ if strict_parsing:
+ raise ValueError, "bad query field: %r" % (name_value,)
+ # Handle case of a control-name with no equal sign
+ if keep_blank_values:
+ nv.append('')
+ else:
+ continue
+ if len(nv[1]) or keep_blank_values:
+ name = urllib.unquote(nv[0].replace('+', ' '))
+ value = urllib.unquote(nv[1].replace('+', ' '))
+ r.append((name, value))
+
+ return r
+
+
+def parse_multipart(fp, pdict):
+ """Parse multipart input.
+
+ Arguments:
+ fp : input file
+ pdict: dictionary containing other parameters of content-type header
+
+ Returns a dictionary just like parse_qs(): keys are the field names, each
+ value is a list of values for that field. This is easy to use but not
+ much good if you are expecting megabytes to be uploaded -- in that case,
+ use the FieldStorage class instead which is much more flexible. Note
+ that content-type is the raw, unparsed contents of the content-type
+ header.
+
+ XXX This does not parse nested multipart parts -- use FieldStorage for
+ that.
+
+ XXX This should really be subsumed by FieldStorage altogether -- no
+ point in having two implementations of the same parsing algorithm.
+ Also, FieldStorage protects itself better against certain DoS attacks
+ by limiting the size of the data read in one chunk. The API here
+ does not support that kind of protection. This also affects parse()
+ since it can call parse_multipart().
+
+ """
+ boundary = ""
+ if 'boundary' in pdict:
+ boundary = pdict['boundary']
+ if not valid_boundary(boundary):
+ raise ValueError, ('Invalid boundary in multipart form: %r'
+ % (boundary,))
+
+ nextpart = "--" + boundary
+ lastpart = "--" + boundary + "--"
+ partdict = {}
+ terminator = ""
+
+ while terminator != lastpart:
+ bytes = -1
+ data = None
+ if terminator:
+ # At start of next part. Read headers first.
+ headers = mimetools.Message(fp)
+ clength = headers.getheader('content-length')
+ if clength:
+ try:
+ bytes = int(clength)
+ except ValueError:
+ pass
+ if bytes > 0:
+ if maxlen and bytes > maxlen:
+ raise ValueError, 'Maximum content length exceeded'
+ data = fp.read(bytes)
+ else:
+ data = ""
+ # Read lines until end of part.
+ lines = []
+ while 1:
+ line = fp.readline()
+ if not line:
+ terminator = lastpart # End outer loop
+ break
+ if line[:2] == "--":
+ terminator = line.strip()
+ if terminator in (nextpart, lastpart):
+ break
+ lines.append(line)
+ # Done with part.
+ if data is None:
+ continue
+ if bytes < 0:
+ if lines:
+ # Strip final line terminator
+ line = lines[-1]
+ if line[-2:] == "\r\n":
+ line = line[:-2]
+ elif line[-1:] == "\n":
+ line = line[:-1]
+ lines[-1] = line
+ data = "".join(lines)
+ line = headers['content-disposition']
+ if not line:
+ continue
+ key, params = parse_header(line)
+ if key != 'form-data':
+ continue
+ if 'name' in params:
+ name = params['name']
+ else:
+ continue
+ if name in partdict:
+ partdict[name].append(data)
+ else:
+ partdict[name] = [data]
+
+ return partdict
+
+
+def parse_header(line):
+ """Parse a Content-type like header.
+
+ Return the main content-type and a dictionary of options.
+
+ """
+ plist = [x.strip() for x in line.split(';')]
+ key = plist.pop(0).lower()
+ pdict = {}
+ for p in plist:
+ i = p.find('=')
+ if i >= 0:
+ name = p[:i].strip().lower()
+ value = p[i+1:].strip()
+ if len(value) >= 2 and value[0] == value[-1] == '"':
+ value = value[1:-1]
+ value = value.replace('\\\\', '\\').replace('\\"', '"')
+ pdict[name] = value
+ return key, pdict
+
+
+# Classes for field storage
+# =========================
+
+class MiniFieldStorage:
+
+ """Like FieldStorage, for use when no file uploads are possible."""
+
+ # Dummy attributes
+ filename = None
+ list = None
+ type = None
+ file = None
+ type_options = {}
+ disposition = None
+ disposition_options = {}
+ headers = {}
+
+ def __init__(self, name, value):
+ """Constructor from field name and value."""
+ self.name = name
+ self.value = value
+ # self.file = StringIO(value)
+
+ def __repr__(self):
+ """Return printable representation."""
+ return "MiniFieldStorage(%r, %r)" % (self.name, self.value)
+
+
+class FieldStorage:
+
+ """Store a sequence of fields, reading multipart/form-data.
+
+ This class provides naming, typing, files stored on disk, and
+ more. At the top level, it is accessible like a dictionary, whose
+ keys are the field names. (Note: None can occur as a field name.)
+ The items are either a Python list (if there's multiple values) or
+ another FieldStorage or MiniFieldStorage object. If it's a single
+ object, it has the following attributes:
+
+ name: the field name, if specified; otherwise None
+
+ filename: the filename, if specified; otherwise None; this is the
+ client side filename, *not* the file name on which it is
+ stored (that's a temporary file you don't deal with)
+
+ value: the value as a *string*; for file uploads, this
+ transparently reads the file every time you request the value
+
+ file: the file(-like) object from which you can read the data;
+ None if the data is stored a simple string
+
+ type: the content-type, or None if not specified
+
+ type_options: dictionary of options specified on the content-type
+ line
+
+ disposition: content-disposition, or None if not specified
+
+ disposition_options: dictionary of corresponding options
+
+ headers: a dictionary(-like) object (sometimes rfc822.Message or a
+ subclass thereof) containing *all* headers
+
+ The class is subclassable, mostly for the purpose of overriding
+ the make_file() method, which is called internally to come up with
+ a file open for reading and writing. This makes it possible to
+ override the default choice of storing all files in a temporary
+ directory and unlinking them as soon as they have been opened.
+
+ """
+
+ def __init__(self, fp=None, headers=None, outerboundary="",
+ environ=os.environ, keep_blank_values=0, strict_parsing=0):
+ """Constructor. Read multipart/* until last part.
+
+ Arguments, all optional:
+
+ fp : file pointer; default: sys.stdin
+ (not used when the request method is GET)
+
+ headers : header dictionary-like object; default:
+ taken from environ as per CGI spec
+
+ outerboundary : terminating multipart boundary
+ (for internal use only)
+
+ environ : environment dictionary; default: os.environ
+
+ keep_blank_values: flag indicating whether blank values in
+ URL encoded forms should be treated as blank strings.
+ A true value indicates that blanks should be retained as
+ blank strings. The default false value indicates that
+ blank values are to be ignored and treated as if they were
+ not included.
+
+ strict_parsing: flag indicating what to do with parsing errors.
+ If false (the default), errors are silently ignored.
+ If true, errors raise a ValueError exception.
+
+ """
+ method = 'GET'
+ self.keep_blank_values = keep_blank_values
+ self.strict_parsing = strict_parsing
+ if 'REQUEST_METHOD' in environ:
+ method = environ['REQUEST_METHOD'].upper()
+ if method == 'GET' or method == 'HEAD':
+ if 'QUERY_STRING' in environ:
+ qs = environ['QUERY_STRING']
+ elif sys.argv[1:]:
+ qs = sys.argv[1]
+ else:
+ qs = ""
+ fp = StringIO(qs)
+ if headers is None:
+ headers = {'content-type':
+ "application/x-www-form-urlencoded"}
+ if headers is None:
+ headers = {}
+ if method == 'POST':
+ # Set default content-type for POST to what's traditional
+ headers['content-type'] = "application/x-www-form-urlencoded"
+ if 'CONTENT_TYPE' in environ:
+ headers['content-type'] = environ['CONTENT_TYPE']
+ if 'CONTENT_LENGTH' in environ:
+ headers['content-length'] = environ['CONTENT_LENGTH']
+ self.fp = fp or sys.stdin
+ self.headers = headers
+ self.outerboundary = outerboundary
+
+ # Process content-disposition header
+ cdisp, pdict = "", {}
+ if 'content-disposition' in self.headers:
+ cdisp, pdict = parse_header(self.headers['content-disposition'])
+ self.disposition = cdisp
+ self.disposition_options = pdict
+ self.name = None
+ if 'name' in pdict:
+ self.name = pdict['name']
+ self.filename = None
+ if 'filename' in pdict:
+ self.filename = pdict['filename']
+
+ # Process content-type header
+ #
+ # Honor any existing content-type header. But if there is no
+ # content-type header, use some sensible defaults. Assume
+ # outerboundary is "" at the outer level, but something non-false
+ # inside a multi-part. The default for an inner part is text/plain,
+ # but for an outer part it should be urlencoded. This should catch
+ # bogus clients which erroneously forget to include a content-type
+ # header.
+ #
+ # See below for what we do if there does exist a content-type header,
+ # but it happens to be something we don't understand.
+ if 'content-type' in self.headers:
+ ctype, pdict = parse_header(self.headers['content-type'])
+ elif self.outerboundary or method != 'POST':
+ ctype, pdict = "text/plain", {}
+ else:
+ ctype, pdict = 'application/x-www-form-urlencoded', {}
+ self.type = ctype
+ self.type_options = pdict
+ self.innerboundary = ""
+ if 'boundary' in pdict:
+ self.innerboundary = pdict['boundary']
+ clen = -1
+ if 'content-length' in self.headers:
+ try:
+ clen = int(self.headers['content-length'])
+ except ValueError:
+ pass
+ if maxlen and clen > maxlen:
+ raise ValueError, 'Maximum content length exceeded'
+ self.length = clen
+
+ self.list = self.file = None
+ self.done = 0
+ if ctype == 'application/x-www-form-urlencoded':
+ self.read_urlencoded()
+ elif ctype[:10] == 'multipart/':
+ self.read_multi(environ, keep_blank_values, strict_parsing)
+ else:
+ self.read_single()
+
+ def __repr__(self):
+ """Return a printable representation."""
+ return "FieldStorage(%r, %r, %r)" % (
+ self.name, self.filename, self.value)
+
+ def __iter__(self):
+ return iter(self.keys())
+
+ def __getattr__(self, name):
+ if name != 'value':
+ raise AttributeError, name
+ if self.file:
+ self.file.seek(0)
+ value = self.file.read()
+ self.file.seek(0)
+ elif self.list is not None:
+ value = self.list
+ else:
+ value = None
+ return value
+
+ def __getitem__(self, key):
+ """Dictionary style indexing."""
+ if self.list is None:
+ raise TypeError, "not indexable"
+ found = []
+ for item in self.list:
+ if item.name == key: found.append(item)
+ if not found:
+ raise KeyError, key
+ if len(found) == 1:
+ return found[0]
+ else:
+ return found
+
+ def getvalue(self, key, default=None):
+ """Dictionary style get() method, including 'value' lookup."""
+ if key in self:
+ value = self[key]
+ if type(value) is type([]):
+ return map(attrgetter('value'), value)
+ else:
+ return value.value
+ else:
+ return default
+
+ def getfirst(self, key, default=None):
+ """ Return the first value received."""
+ if key in self:
+ value = self[key]
+ if type(value) is type([]):
+ return value[0].value
+ else:
+ return value.value
+ else:
+ return default
+
+ def getlist(self, key):
+ """ Return list of received values."""
+ if key in self:
+ value = self[key]
+ if type(value) is type([]):
+ return map(attrgetter('value'), value)
+ else:
+ return [value.value]
+ else:
+ return []
+
+ def keys(self):
+ """Dictionary style keys() method."""
+ if self.list is None:
+ raise TypeError, "not indexable"
+ keys = []
+ for item in self.list:
+ if item.name not in keys: keys.append(item.name)
+ return keys
+
+ def has_key(self, key):
+ """Dictionary style has_key() method."""
+ if self.list is None:
+ raise TypeError, "not indexable"
+ for item in self.list:
+ if item.name == key: return True
+ return False
+
+ def __contains__(self, key):
+ """Dictionary style __contains__ method."""
+ if self.list is None:
+ raise TypeError, "not indexable"
+ for item in self.list:
+ if item.name == key: return True
+ return False
+
+ def __len__(self):
+ """Dictionary style len(x) support."""
+ return len(self.keys())
+
+ def read_urlencoded(self):
+ """Internal: read data in query string format."""
+ qs = self.fp.read(self.length)
+ self.list = list = []
+ for key, value in parse_qsl(qs, self.keep_blank_values,
+ self.strict_parsing):
+ list.append(MiniFieldStorage(key, value))
+ self.skip_lines()
+
+ FieldStorageClass = None
+
+ def read_multi(self, environ, keep_blank_values, strict_parsing):
+ """Internal: read a part that is itself multipart."""
+ ib = self.innerboundary
+ if not valid_boundary(ib):
+ raise ValueError, 'Invalid boundary in multipart form: %r' % (ib,)
+ self.list = []
+ klass = self.FieldStorageClass or self.__class__
+ part = klass(self.fp, {}, ib,
+ environ, keep_blank_values, strict_parsing)
+ # Throw first part away
+ while not part.done:
+ headers = rfc822.Message(self.fp)
+ part = klass(self.fp, headers, ib,
+ environ, keep_blank_values, strict_parsing)
+ self.list.append(part)
+ self.skip_lines()
+
+ def read_single(self):
+ """Internal: read an atomic part."""
+ if self.length >= 0:
+ self.read_binary()
+ self.skip_lines()
+ else:
+ self.read_lines()
+ self.file.seek(0)
+
+ bufsize = 8*1024 # I/O buffering size for copy to file
+
+ def read_binary(self):
+ """Internal: read binary data."""
+ self.file = self.make_file('b')
+ todo = self.length
+ if todo >= 0:
+ while todo > 0:
+ data = self.fp.read(min(todo, self.bufsize))
+ if not data:
+ self.done = -1
+ break
+ self.file.write(data)
+ todo = todo - len(data)
+
+ def read_lines(self):
+ """Internal: read lines until EOF or outerboundary."""
+ self.file = self.__file = StringIO()
+ if self.outerboundary:
+ self.read_lines_to_outerboundary()
+ else:
+ self.read_lines_to_eof()
+
+ def __write(self, line):
+ if self.__file is not None:
+ if self.__file.tell() + len(line) > 1000:
+ self.file = self.make_file('')
+ self.file.write(self.__file.getvalue())
+ self.__file = None
+ self.file.write(line)
+
+ def read_lines_to_eof(self):
+ """Internal: read lines until EOF."""
+ while 1:
+ line = self.fp.readline(1<<16)
+ if not line:
+ self.done = -1
+ break
+ self.__write(line)
+
+ def read_lines_to_outerboundary(self):
+ """Internal: read lines until outerboundary."""
+ next = "--" + self.outerboundary
+ last = next + "--"
+ delim = ""
+ last_line_lfend = True
+ while 1:
+ line = self.fp.readline(1<<16)
+ if not line:
+ self.done = -1
+ break
+ if line[:2] == "--" and last_line_lfend:
+ strippedline = line.strip()
+ if strippedline == next:
+ break
+ if strippedline == last:
+ self.done = 1
+ break
+ odelim = delim
+ if line[-2:] == "\r\n":
+ delim = "\r\n"
+ line = line[:-2]
+ last_line_lfend = True
+ elif line[-1] == "\n":
+ delim = "\n"
+ line = line[:-1]
+ last_line_lfend = True
+ else:
+ delim = ""
+ last_line_lfend = False
+ self.__write(odelim + line)
+
+ def skip_lines(self):
+ """Internal: skip lines until outer boundary if defined."""
+ if not self.outerboundary or self.done:
+ return
+ next = "--" + self.outerboundary
+ last = next + "--"
+ last_line_lfend = True
+ while 1:
+ line = self.fp.readline(1<<16)
+ if not line:
+ self.done = -1
+ break
+ if line[:2] == "--" and last_line_lfend:
+ strippedline = line.strip()
+ if strippedline == next:
+ break
+ if strippedline == last:
+ self.done = 1
+ break
+ last_line_lfend = line.endswith('\n')
+
+ def make_file(self, binary=None):
+ """Overridable: return a readable & writable file.
+
+ The file will be used as follows:
+ - data is written to it
+ - seek(0)
+ - data is read from it
+
+ The 'binary' argument is unused -- the file is always opened
+ in binary mode.
+
+ This version opens a temporary file for reading and writing,
+ and immediately deletes (unlinks) it. The trick (on Unix!) is
+ that the file can still be used, but it can't be opened by
+ another process, and it will automatically be deleted when it
+ is closed or when the current process terminates.
+
+ If you want a more permanent file, you derive a class which
+ overrides this method. If you want a visible temporary file
+ that is nevertheless automatically deleted when the script
+ terminates, try defining a __del__ method in a derived class
+ which unlinks the temporary files you have created.
+
+ """
+ import tempfile
+ return tempfile.TemporaryFile("w+b")
+
+
+
+# Backwards Compatibility Classes
+# ===============================
+
+class FormContentDict(UserDict.UserDict):
+ """Form content as dictionary with a list of values per field.
+
+ form = FormContentDict()
+
+ form[key] -> [value, value, ...]
+ key in form -> Boolean
+ form.keys() -> [key, key, ...]
+ form.values() -> [[val, val, ...], [val, val, ...], ...]
+ form.items() -> [(key, [val, val, ...]), (key, [val, val, ...]), ...]
+ form.dict == {key: [val, val, ...], ...}
+
+ """
+ def __init__(self, environ=os.environ):
+ self.dict = self.data = parse(environ=environ)
+ self.query_string = environ['QUERY_STRING']
+
+
+class SvFormContentDict(FormContentDict):
+ """Form content as dictionary expecting a single value per field.
+
+ If you only expect a single value for each field, then form[key]
+ will return that single value. It will raise an IndexError if
+ that expectation is not true. If you expect a field to have
+ possible multiple values, than you can use form.getlist(key) to
+ get all of the values. values() and items() are a compromise:
+ they return single strings where there is a single value, and
+ lists of strings otherwise.
+
+ """
+ def __getitem__(self, key):
+ if len(self.dict[key]) > 1:
+ raise IndexError, 'expecting a single value'
+ return self.dict[key][0]
+ def getlist(self, key):
+ return self.dict[key]
+ def values(self):
+ result = []
+ for value in self.dict.values():
+ if len(value) == 1:
+ result.append(value[0])
+ else: result.append(value)
+ return result
+ def items(self):
+ result = []
+ for key, value in self.dict.items():
+ if len(value) == 1:
+ result.append((key, value[0]))
+ else: result.append((key, value))
+ return result
+
+
+class InterpFormContentDict(SvFormContentDict):
+ """This class is present for backwards compatibility only."""
+ def __getitem__(self, key):
+ v = SvFormContentDict.__getitem__(self, key)
+ if v[0] in '0123456789+-.':
+ try: return int(v)
+ except ValueError:
+ try: return float(v)
+ except ValueError: pass
+ return v.strip()
+ def values(self):
+ result = []
+ for key in self.keys():
+ try:
+ result.append(self[key])
+ except IndexError:
+ result.append(self.dict[key])
+ return result
+ def items(self):
+ result = []
+ for key in self.keys():
+ try:
+ result.append((key, self[key]))
+ except IndexError:
+ result.append((key, self.dict[key]))
+ return result
+
+
+class FormContent(FormContentDict):
+ """This class is present for backwards compatibility only."""
+ def values(self, key):
+ if key in self.dict :return self.dict[key]
+ else: return None
+ def indexed_value(self, key, location):
+ if key in self.dict:
+ if len(self.dict[key]) > location:
+ return self.dict[key][location]
+ else: return None
+ else: return None
+ def value(self, key):
+ if key in self.dict: return self.dict[key][0]
+ else: return None
+ def length(self, key):
+ return len(self.dict[key])
+ def stripped(self, key):
+ if key in self.dict: return self.dict[key][0].strip()
+ else: return None
+ def pars(self):
+ return self.dict
+
+
+# Test/debug code
+# ===============
+
+def test(environ=os.environ):
+ """Robust test CGI script, usable as main program.
+
+ Write minimal HTTP headers and dump all information provided to
+ the script in HTML form.
+
+ """
+ print "Content-type: text/html"
+ sys.stderr = sys.stdout
+ try:
+ form = FieldStorage() # Replace with other classes to test those
+ print_directory()
+ print_arguments()
+ print_form(form)
+ print_environ(environ)
+ print_environ_usage()
+ def f():
+ exec "testing print_exception() -- <I>italics?</I>"
+ def g(f=f):
+ f()
+ print "<H3>What follows is a test, not an actual exception:</H3>"
+ g()
+ except:
+ print_exception()
+
+ print "<H1>Second try with a small maxlen...</H1>"
+
+ global maxlen
+ maxlen = 50
+ try:
+ form = FieldStorage() # Replace with other classes to test those
+ print_directory()
+ print_arguments()
+ print_form(form)
+ print_environ(environ)
+ except:
+ print_exception()
+
+def print_exception(type=None, value=None, tb=None, limit=None):
+ if type is None:
+ type, value, tb = sys.exc_info()
+ import traceback
+ print "<H3>Traceback (most recent call last):</H3>"
+ list = traceback.format_tb(tb, limit) + \
+ traceback.format_exception_only(type, value)
+ print "<PRE>%s<B>%s</B></PRE>" % (
+ escape("".join(list[:-1])),
+ escape(list[-1]),
+ )
+ del tb
+
+def print_environ(environ=os.environ):
+ """Dump the shell environment as HTML."""
+ keys = environ.keys()
+ keys.sort()
+ print "<H3>Shell Environment:</H3>"
+ print "<DL>"
+ for key in keys:
+ print "<DT>", escape(key), "<DD>", escape(environ[key])
+ print "</DL>"
+
+def print_form(form):
+ """Dump the contents of a form as HTML."""
+ keys = form.keys()
+ keys.sort()
+ print "<H3>Form Contents:</H3>"
+ if not keys:
+ print "<P>No form fields."
+ print "<DL>"
+ for key in keys:
+ print "<DT>" + escape(key) + ":",
+ value = form[key]
+ print "<i>" + escape(repr(type(value))) + "</i>"
+ print "<DD>" + escape(repr(value))
+ print "</DL>"
+
+def print_directory():
+ """Dump the current directory as HTML."""
+ print "<H3>Current Working Directory:</H3>"
+ try:
+ pwd = os.getcwd()
+ except os.error, msg:
+ print "os.error:", escape(str(msg))
+ else:
+ print escape(pwd)
+
+def print_arguments():
+ print "<H3>Command Line Arguments:</H3>"
+ print sys.argv
+
+def print_environ_usage():
+ """Dump a list of environment variables used by CGI as HTML."""
+ print """
+<H3>These environment variables could have been set:</H3>
+<UL>
+<LI>AUTH_TYPE
+<LI>CONTENT_LENGTH
+<LI>CONTENT_TYPE
+<LI>DATE_GMT
+<LI>DATE_LOCAL
+<LI>DOCUMENT_NAME
+<LI>DOCUMENT_ROOT
+<LI>DOCUMENT_URI
+<LI>GATEWAY_INTERFACE
+<LI>LAST_MODIFIED
+<LI>PATH
+<LI>PATH_INFO
+<LI>PATH_TRANSLATED
+<LI>QUERY_STRING
+<LI>REMOTE_ADDR
+<LI>REMOTE_HOST
+<LI>REMOTE_IDENT
+<LI>REMOTE_USER
+<LI>REQUEST_METHOD
+<LI>SCRIPT_NAME
+<LI>SERVER_NAME
+<LI>SERVER_PORT
+<LI>SERVER_PROTOCOL
+<LI>SERVER_ROOT
+<LI>SERVER_SOFTWARE
+</UL>
+In addition, HTTP headers sent by the server may be passed in the
+environment as well. Here are some common variable names:
+<UL>
+<LI>HTTP_ACCEPT
+<LI>HTTP_CONNECTION
+<LI>HTTP_HOST
+<LI>HTTP_PRAGMA
+<LI>HTTP_REFERER
+<LI>HTTP_USER_AGENT
+</UL>
+"""
+
+
+# Utilities
+# =========
+
+def escape(s, quote=None):
+ '''Replace special characters "&", "<" and ">" to HTML-safe sequences.
+ If the optional flag quote is true, the quotation mark character (")
+ is also translated.'''
+ s = s.replace("&", "&") # Must be done first!
+ s = s.replace("<", "<")
+ s = s.replace(">", ">")
+ if quote:
+ s = s.replace('"', """)
+ return s
+
+def valid_boundary(s, _vb_pattern="^[ -~]{0,200}[!-~]$"):
+ import re
+ return re.match(_vb_pattern, s)
+
+# Invoke mainline
+# ===============
+
+# Call test() when this file is run as a script (not imported as a module)
+if __name__ == '__main__':
+ test()
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/cgitb.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,317 @@
+"""More comprehensive traceback formatting for Python scripts.
+
+To enable this module, do:
+
+ import cgitb; cgitb.enable()
+
+at the top of your script. The optional arguments to enable() are:
+
+ display - if true, tracebacks are displayed in the web browser
+ logdir - if set, tracebacks are written to files in this directory
+ context - number of lines of source code to show for each stack frame
+ format - 'text' or 'html' controls the output format
+
+By default, tracebacks are displayed but not saved, the context is 5 lines
+and the output format is 'html' (for backwards compatibility with the
+original use of this module)
+
+Alternatively, if you have caught an exception and want cgitb to display it
+for you, call cgitb.handler(). The optional argument to handler() is a
+3-item tuple (etype, evalue, etb) just like the value of sys.exc_info().
+The default handler displays output as HTML.
+"""
+
+__author__ = 'Ka-Ping Yee'
+
+__version__ = '$Revision: 39758 $'
+
+import sys
+
+def reset():
+ """Return a string that resets the CGI and browser to a known state."""
+ return '''<!--: spam
+Content-Type: text/html
+
+<body bgcolor="#f0f0f8"><font color="#f0f0f8" size="-5"> -->
+<body bgcolor="#f0f0f8"><font color="#f0f0f8" size="-5"> --> -->
+</font> </font> </font> </script> </object> </blockquote> </pre>
+</table> </table> </table> </table> </table> </font> </font> </font>'''
+
+__UNDEF__ = [] # a special sentinel object
+def small(text):
+ if text:
+ return '<small>' + text + '</small>'
+ else:
+ return ''
+
+def strong(text):
+ if text:
+ return '<strong>' + text + '</strong>'
+ else:
+ return ''
+
+def grey(text):
+ if text:
+ return '<font color="#909090">' + text + '</font>'
+ else:
+ return ''
+
+def lookup(name, frame, locals):
+ """Find the value for a given name in the given environment."""
+ if name in locals:
+ return 'local', locals[name]
+ if name in frame.f_globals:
+ return 'global', frame.f_globals[name]
+ if '__builtins__' in frame.f_globals:
+ builtins = frame.f_globals['__builtins__']
+ if type(builtins) is type({}):
+ if name in builtins:
+ return 'builtin', builtins[name]
+ else:
+ if hasattr(builtins, name):
+ return 'builtin', getattr(builtins, name)
+ return None, __UNDEF__
+
+def scanvars(reader, frame, locals):
+ """Scan one logical line of Python and look up values of variables used."""
+ import tokenize, keyword
+ vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__
+ for ttype, token, start, end, line in tokenize.generate_tokens(reader):
+ if ttype == tokenize.NEWLINE: break
+ if ttype == tokenize.NAME and token not in keyword.kwlist:
+ if lasttoken == '.':
+ if parent is not __UNDEF__:
+ value = getattr(parent, token, __UNDEF__)
+ vars.append((prefix + token, prefix, value))
+ else:
+ where, value = lookup(token, frame, locals)
+ vars.append((token, where, value))
+ elif token == '.':
+ prefix += lasttoken + '.'
+ parent = value
+ else:
+ parent, prefix = None, ''
+ lasttoken = token
+ return vars
+
+def html((etype, evalue, etb), context=5):
+ """Return a nice HTML document describing a given traceback."""
+ import os, types, time, traceback, linecache, inspect, pydoc
+
+ if type(etype) is types.ClassType:
+ etype = etype.__name__
+ pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
+ date = time.ctime(time.time())
+ head = '<body bgcolor="#f0f0f8">' + pydoc.html.heading(
+ '<big><big>%s</big></big>' %
+ strong(pydoc.html.escape(str(etype))),
+ '#ffffff', '#6622aa', pyver + '<br>' + date) + '''
+<p>A problem occurred in a Python script. Here is the sequence of
+function calls leading up to the error, in the order they occurred.</p>'''
+
+ indent = '<tt>' + small(' ' * 5) + ' </tt>'
+ frames = []
+ records = inspect.getinnerframes(etb, context)
+ for frame, file, lnum, func, lines, index in records:
+ if file:
+ file = os.path.abspath(file)
+ link = '<a href="file://%s">%s</a>' % (file, pydoc.html.escape(file))
+ else:
+ file = link = '?'
+ args, varargs, varkw, locals = inspect.getargvalues(frame)
+ call = ''
+ if func != '?':
+ call = 'in ' + strong(func) + \
+ inspect.formatargvalues(args, varargs, varkw, locals,
+ formatvalue=lambda value: '=' + pydoc.html.repr(value))
+
+ highlight = {}
+ def reader(lnum=[lnum]):
+ highlight[lnum[0]] = 1
+ try: return linecache.getline(file, lnum[0])
+ finally: lnum[0] += 1
+ vars = scanvars(reader, frame, locals)
+
+ rows = ['<tr><td bgcolor="#d8bbff">%s%s %s</td></tr>' %
+ ('<big> </big>', link, call)]
+ if index is not None:
+ i = lnum - index
+ for line in lines:
+ num = small(' ' * (5-len(str(i))) + str(i)) + ' '
+ line = '<tt>%s%s</tt>' % (num, pydoc.html.preformat(line))
+ if i in highlight:
+ rows.append('<tr><td bgcolor="#ffccee">%s</td></tr>' % line)
+ else:
+ rows.append('<tr><td>%s</td></tr>' % grey(line))
+ i += 1
+
+ done, dump = {}, []
+ for name, where, value in vars:
+ if name in done: continue
+ done[name] = 1
+ if value is not __UNDEF__:
+ if where in ('global', 'builtin'):
+ name = ('<em>%s</em> ' % where) + strong(name)
+ elif where == 'local':
+ name = strong(name)
+ else:
+ name = where + strong(name.split('.')[-1])
+ dump.append('%s = %s' % (name, pydoc.html.repr(value)))
+ else:
+ dump.append(name + ' <em>undefined</em>')
+
+ rows.append('<tr><td>%s</td></tr>' % small(grey(', '.join(dump))))
+ frames.append('''
+<table width="100%%" cellspacing=0 cellpadding=0 border=0>
+%s</table>''' % '\n'.join(rows))
+
+ exception = ['<p>%s: %s' % (strong(pydoc.html.escape(str(etype))),
+ pydoc.html.escape(str(evalue)))]
+ if type(evalue) is types.InstanceType:
+ for name in dir(evalue):
+ if name[:1] == '_': continue
+ value = pydoc.html.repr(getattr(evalue, name))
+ exception.append('\n<br>%s%s =\n%s' % (indent, name, value))
+
+ import traceback
+ return head + ''.join(frames) + ''.join(exception) + '''
+
+
+<!-- The above is a description of an error in a Python program, formatted
+ for a Web browser because the 'cgitb' module was enabled. In case you
+ are not reading this in a Web browser, here is the original traceback:
+
+%s
+-->
+''' % ''.join(traceback.format_exception(etype, evalue, etb))
+
+def text((etype, evalue, etb), context=5):
+ """Return a plain text document describing a given traceback."""
+ import os, types, time, traceback, linecache, inspect, pydoc
+
+ if type(etype) is types.ClassType:
+ etype = etype.__name__
+ pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
+ date = time.ctime(time.time())
+ head = "%s\n%s\n%s\n" % (str(etype), pyver, date) + '''
+A problem occurred in a Python script. Here is the sequence of
+function calls leading up to the error, in the order they occurred.
+'''
+
+ frames = []
+ records = inspect.getinnerframes(etb, context)
+ for frame, file, lnum, func, lines, index in records:
+ file = file and os.path.abspath(file) or '?'
+ args, varargs, varkw, locals = inspect.getargvalues(frame)
+ call = ''
+ if func != '?':
+ call = 'in ' + func + \
+ inspect.formatargvalues(args, varargs, varkw, locals,
+ formatvalue=lambda value: '=' + pydoc.text.repr(value))
+
+ highlight = {}
+ def reader(lnum=[lnum]):
+ highlight[lnum[0]] = 1
+ try: return linecache.getline(file, lnum[0])
+ finally: lnum[0] += 1
+ vars = scanvars(reader, frame, locals)
+
+ rows = [' %s %s' % (file, call)]
+ if index is not None:
+ i = lnum - index
+ for line in lines:
+ num = '%5d ' % i
+ rows.append(num+line.rstrip())
+ i += 1
+
+ done, dump = {}, []
+ for name, where, value in vars:
+ if name in done: continue
+ done[name] = 1
+ if value is not __UNDEF__:
+ if where == 'global': name = 'global ' + name
+ elif where != 'local': name = where + name.split('.')[-1]
+ dump.append('%s = %s' % (name, pydoc.text.repr(value)))
+ else:
+ dump.append(name + ' undefined')
+
+ rows.append('\n'.join(dump))
+ frames.append('\n%s\n' % '\n'.join(rows))
+
+ exception = ['%s: %s' % (str(etype), str(evalue))]
+ if type(evalue) is types.InstanceType:
+ for name in dir(evalue):
+ value = pydoc.text.repr(getattr(evalue, name))
+ exception.append('\n%s%s = %s' % (" "*4, name, value))
+
+ import traceback
+ return head + ''.join(frames) + ''.join(exception) + '''
+
+The above is a description of an error in a Python program. Here is
+the original traceback:
+
+%s
+''' % ''.join(traceback.format_exception(etype, evalue, etb))
+
+class Hook:
+ """A hook to replace sys.excepthook that shows tracebacks in HTML."""
+
+ def __init__(self, display=1, logdir=None, context=5, file=None,
+ format="html"):
+ self.display = display # send tracebacks to browser if true
+ self.logdir = logdir # log tracebacks to files if not None
+ self.context = context # number of source code lines per frame
+ self.file = file or sys.stdout # place to send the output
+ self.format = format
+
+ def __call__(self, etype, evalue, etb):
+ self.handle((etype, evalue, etb))
+
+ def handle(self, info=None):
+ info = info or sys.exc_info()
+ if self.format == "html":
+ self.file.write(reset())
+
+ formatter = (self.format=="html") and html or text
+ plain = False
+ try:
+ doc = formatter(info, self.context)
+ except: # just in case something goes wrong
+ import traceback
+ doc = ''.join(traceback.format_exception(*info))
+ plain = True
+
+ if self.display:
+ if plain:
+ doc = doc.replace('&', '&').replace('<', '<')
+ self.file.write('<pre>' + doc + '</pre>\n')
+ else:
+ self.file.write(doc + '\n')
+ else:
+ self.file.write('<p>A problem occurred in a Python script.\n')
+
+ if self.logdir is not None:
+ import os, tempfile
+ suffix = ['.txt', '.html'][self.format=="html"]
+ (fd, path) = tempfile.mkstemp(suffix=suffix, dir=self.logdir)
+ try:
+ file = os.fdopen(fd, 'w')
+ file.write(doc)
+ file.close()
+ msg = '<p> %s contains the description of this error.' % path
+ except:
+ msg = '<p> Tried to save traceback to %s, but failed.' % path
+ self.file.write(msg + '\n')
+ try:
+ self.file.flush()
+ except: pass
+
+handler = Hook().handle
+def enable(display=1, logdir=None, context=5, format="html"):
+ """Install an exception handler that formats tracebacks as HTML.
+
+ The optional argument 'display' can be set to 0 to suppress sending the
+ traceback to the browser, and 'logdir' can be set to a directory to cause
+ tracebacks to be written to files there."""
+ sys.excepthook = Hook(display=display, logdir=logdir,
+ context=context, format=format)
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/chunk.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,167 @@
+"""Simple class to read IFF chunks.
+
+An IFF chunk (used in formats such as AIFF, TIFF, RMFF (RealMedia File
+Format)) has the following structure:
+
++----------------+
+| ID (4 bytes) |
++----------------+
+| size (4 bytes) |
++----------------+
+| data |
+| ... |
++----------------+
+
+The ID is a 4-byte string which identifies the type of chunk.
+
+The size field (a 32-bit value, encoded using big-endian byte order)
+gives the size of the whole chunk, including the 8-byte header.
+
+Usually an IFF-type file consists of one or more chunks. The proposed
+usage of the Chunk class defined here is to instantiate an instance at
+the start of each chunk and read from the instance until it reaches
+the end, after which a new instance can be instantiated. At the end
+of the file, creating a new instance will fail with a EOFError
+exception.
+
+Usage:
+while True:
+ try:
+ chunk = Chunk(file)
+ except EOFError:
+ break
+ chunktype = chunk.getname()
+ while True:
+ data = chunk.read(nbytes)
+ if not data:
+ pass
+ # do something with data
+
+The interface is file-like. The implemented methods are:
+read, close, seek, tell, isatty.
+Extra methods are: skip() (called by close, skips to the end of the chunk),
+getname() (returns the name (ID) of the chunk)
+
+The __init__ method has one required argument, a file-like object
+(including a chunk instance), and one optional argument, a flag which
+specifies whether or not chunks are aligned on 2-byte boundaries. The
+default is 1, i.e. aligned.
+"""
+
+class Chunk:
+ def __init__(self, file, align=True, bigendian=True, inclheader=False):
+ import struct
+ self.closed = False
+ self.align = align # whether to align to word (2-byte) boundaries
+ if bigendian:
+ strflag = '>'
+ else:
+ strflag = '<'
+ self.file = file
+ self.chunkname = file.read(4)
+ if len(self.chunkname) < 4:
+ raise EOFError
+ try:
+ self.chunksize = struct.unpack(strflag+'L', file.read(4))[0]
+ except struct.error:
+ raise EOFError
+ if inclheader:
+ self.chunksize = self.chunksize - 8 # subtract header
+ self.size_read = 0
+ try:
+ self.offset = self.file.tell()
+ except (AttributeError, IOError):
+ self.seekable = False
+ else:
+ self.seekable = True
+
+ def getname(self):
+ """Return the name (ID) of the current chunk."""
+ return self.chunkname
+
+ def getsize(self):
+ """Return the size of the current chunk."""
+ return self.chunksize
+
+ def close(self):
+ if not self.closed:
+ self.skip()
+ self.closed = True
+
+ def isatty(self):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ return False
+
+ def seek(self, pos, whence=0):
+ """Seek to specified position into the chunk.
+ Default position is 0 (start of chunk).
+ If the file is not seekable, this will result in an error.
+ """
+
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ if not self.seekable:
+ raise IOError, "cannot seek"
+ if whence == 1:
+ pos = pos + self.size_read
+ elif whence == 2:
+ pos = pos + self.chunksize
+ if pos < 0 or pos > self.chunksize:
+ raise RuntimeError
+ self.file.seek(self.offset + pos, 0)
+ self.size_read = pos
+
+ def tell(self):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ return self.size_read
+
+ def read(self, size=-1):
+ """Read at most size bytes from the chunk.
+ If size is omitted or negative, read until the end
+ of the chunk.
+ """
+
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ if self.size_read >= self.chunksize:
+ return ''
+ if size < 0:
+ size = self.chunksize - self.size_read
+ if size > self.chunksize - self.size_read:
+ size = self.chunksize - self.size_read
+ data = self.file.read(size)
+ self.size_read = self.size_read + len(data)
+ if self.size_read == self.chunksize and \
+ self.align and \
+ (self.chunksize & 1):
+ dummy = self.file.read(1)
+ self.size_read = self.size_read + len(dummy)
+ return data
+
+ def skip(self):
+ """Skip the rest of the chunk.
+ If you are not interested in the contents of the chunk,
+ this method should be called so that the file points to
+ the start of the next chunk.
+ """
+
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ if self.seekable:
+ try:
+ n = self.chunksize - self.size_read
+ # maybe fix alignment
+ if self.align and (self.chunksize & 1):
+ n = n + 1
+ self.file.seek(n, 1)
+ self.size_read = self.size_read + n
+ return
+ except IOError:
+ pass
+ while self.size_read < self.chunksize:
+ n = min(8192, self.chunksize - self.size_read)
+ dummy = self.read(n)
+ if not dummy:
+ raise EOFError
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/cmd.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,405 @@
+"""A generic class to build line-oriented command interpreters.
+
+Interpreters constructed with this class obey the following conventions:
+
+1. End of file on input is processed as the command 'EOF'.
+2. A command is parsed out of each line by collecting the prefix composed
+ of characters in the identchars member.
+3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method
+ is passed a single argument consisting of the remainder of the line.
+4. Typing an empty line repeats the last command. (Actually, it calls the
+ method `emptyline', which may be overridden in a subclass.)
+5. There is a predefined `help' method. Given an argument `topic', it
+ calls the command `help_topic'. With no arguments, it lists all topics
+ with defined help_ functions, broken into up to three topics; documented
+ commands, miscellaneous help topics, and undocumented commands.
+6. The command '?' is a synonym for `help'. The command '!' is a synonym
+ for `shell', if a do_shell method exists.
+7. If completion is enabled, completing commands will be done automatically,
+ and completing of commands args is done by calling complete_foo() with
+ arguments text, line, begidx, endidx. text is string we are matching
+ against, all returned matches must begin with it. line is the current
+ input line (lstripped), begidx and endidx are the beginning and end
+ indexes of the text being matched, which could be used to provide
+ different completion depending upon which position the argument is in.
+
+The `default' method may be overridden to intercept commands for which there
+is no do_ method.
+
+The `completedefault' method may be overridden to intercept completions for
+commands that have no complete_ method.
+
+The data member `self.ruler' sets the character used to draw separator lines
+in the help messages. If empty, no ruler line is drawn. It defaults to "=".
+
+If the value of `self.intro' is nonempty when the cmdloop method is called,
+it is printed out on interpreter startup. This value may be overridden
+via an optional argument to the cmdloop() method.
+
+The data members `self.doc_header', `self.misc_header', and
+`self.undoc_header' set the headers used for the help function's
+listings of documented functions, miscellaneous topics, and undocumented
+functions respectively.
+
+These interpreters use raw_input; thus, if the readline module is loaded,
+they automatically support Emacs-like command history and editing features.
+"""
+
+import string
+
+__all__ = ["Cmd"]
+
+PROMPT = '(Cmd) '
+IDENTCHARS = string.ascii_letters + string.digits + '_'
+
+class Cmd:
+ """A simple framework for writing line-oriented command interpreters.
+
+ These are often useful for test harnesses, administrative tools, and
+ prototypes that will later be wrapped in a more sophisticated interface.
+
+ A Cmd instance or subclass instance is a line-oriented interpreter
+ framework. There is no good reason to instantiate Cmd itself; rather,
+ it's useful as a superclass of an interpreter class you define yourself
+ in order to inherit Cmd's methods and encapsulate action methods.
+
+ """
+ prompt = PROMPT
+ identchars = IDENTCHARS
+ ruler = '='
+ lastcmd = ''
+ intro = None
+ doc_leader = ""
+ doc_header = "Documented commands (type help <topic>):"
+ misc_header = "Miscellaneous help topics:"
+ undoc_header = "Undocumented commands:"
+ nohelp = "*** No help on %s"
+ use_rawinput = 1
+
+ def __init__(self, completekey='tab', stdin=None, stdout=None):
+ """Instantiate a line-oriented interpreter framework.
+
+ The optional argument 'completekey' is the readline name of a
+ completion key; it defaults to the Tab key. If completekey is
+ not None and the readline module is available, command completion
+ is done automatically. The optional arguments stdin and stdout
+ specify alternate input and output file objects; if not specified,
+ sys.stdin and sys.stdout are used.
+
+ """
+ import sys
+ if stdin is not None:
+ self.stdin = stdin
+ else:
+ self.stdin = sys.stdin
+ if stdout is not None:
+ self.stdout = stdout
+ else:
+ self.stdout = sys.stdout
+ self.cmdqueue = []
+ self.completekey = completekey
+
+ def cmdloop(self, intro=None):
+ """Repeatedly issue a prompt, accept input, parse an initial prefix
+ off the received input, and dispatch to action methods, passing them
+ the remainder of the line as argument.
+
+ """
+
+ self.preloop()
+ if self.use_rawinput and self.completekey:
+ try:
+ import readline
+ self.old_completer = readline.get_completer()
+ readline.set_completer(self.complete)
+ readline.parse_and_bind(self.completekey+": complete")
+ except ImportError:
+ pass
+ try:
+ if intro is not None:
+ self.intro = intro
+ if self.intro:
+ self.stdout.write(str(self.intro)+"\n")
+ stop = None
+ while not stop:
+ if self.cmdqueue:
+ line = self.cmdqueue.pop(0)
+ else:
+ if self.use_rawinput:
+ try:
+ line = raw_input(self.prompt)
+ except EOFError:
+ line = 'EOF'
+ else:
+ self.stdout.write(self.prompt)
+ self.stdout.flush()
+ line = self.stdin.readline()
+ if not len(line):
+ line = 'EOF'
+ else:
+ line = line[:-1] # chop \n
+ line = self.precmd(line)
+ stop = self.onecmd(line)
+ stop = self.postcmd(stop, line)
+ self.postloop()
+ finally:
+ if self.use_rawinput and self.completekey:
+ try:
+ import readline
+ readline.set_completer(self.old_completer)
+ except ImportError:
+ pass
+
+
+ def precmd(self, line):
+ """Hook method executed just before the command line is
+ interpreted, but after the input prompt is generated and issued.
+
+ """
+ return line
+
+ def postcmd(self, stop, line):
+ """Hook method executed just after a command dispatch is finished."""
+ return stop
+
+ def preloop(self):
+ """Hook method executed once when the cmdloop() method is called."""
+ pass
+
+ def postloop(self):
+ """Hook method executed once when the cmdloop() method is about to
+ return.
+
+ """
+ pass
+
+ def parseline(self, line):
+ """Parse the line into a command name and a string containing
+ the arguments. Returns a tuple containing (command, args, line).
+ 'command' and 'args' may be None if the line couldn't be parsed.
+ """
+ line = line.strip()
+ if not line:
+ return None, None, line
+ elif line[0] == '?':
+ line = 'help ' + line[1:]
+ elif line[0] == '!':
+ if hasattr(self, 'do_shell'):
+ line = 'shell ' + line[1:]
+ else:
+ return None, None, line
+ i, n = 0, len(line)
+ while i < n and line[i] in self.identchars: i = i+1
+ cmd, arg = line[:i], line[i:].strip()
+ return cmd, arg, line
+
+ def onecmd(self, line):
+ """Interpret the argument as though it had been typed in response
+ to the prompt.
+
+ This may be overridden, but should not normally need to be;
+ see the precmd() and postcmd() methods for useful execution hooks.
+ The return value is a flag indicating whether interpretation of
+ commands by the interpreter should stop.
+
+ """
+ cmd, arg, line = self.parseline(line)
+ if not line:
+ return self.emptyline()
+ if cmd is None:
+ return self.default(line)
+ self.lastcmd = line
+ if cmd == '':
+ return self.default(line)
+ else:
+ try:
+ func = getattr(self, 'do_' + cmd)
+ except AttributeError:
+ return self.default(line)
+ return func(arg)
+
+ def emptyline(self):
+ """Called when an empty line is entered in response to the prompt.
+
+ If this method is not overridden, it repeats the last nonempty
+ command entered.
+
+ """
+ if self.lastcmd:
+ return self.onecmd(self.lastcmd)
+
+ def default(self, line):
+ """Called on an input line when the command prefix is not recognized.
+
+ If this method is not overridden, it prints an error message and
+ returns.
+
+ """
+ self.stdout.write('*** Unknown syntax: %s\n'%line)
+
+ def completedefault(self, *ignored):
+ """Method called to complete an input line when no command-specific
+ complete_*() method is available.
+
+ By default, it returns an empty list.
+
+ """
+ return []
+
+ def completenames(self, text, *ignored):
+ dotext = 'do_'+text
+ return [a[3:] for a in self.get_names() if a.startswith(dotext)]
+
+ def complete(self, text, state):
+ """Return the next possible completion for 'text'.
+
+ If a command has not been entered, then complete against command list.
+ Otherwise try to call complete_<command> to get list of completions.
+ """
+ if state == 0:
+ import readline
+ origline = readline.get_line_buffer()
+ line = origline.lstrip()
+ stripped = len(origline) - len(line)
+ begidx = readline.get_begidx() - stripped
+ endidx = readline.get_endidx() - stripped
+ if begidx>0:
+ cmd, args, foo = self.parseline(line)
+ if cmd == '':
+ compfunc = self.completedefault
+ else:
+ try:
+ compfunc = getattr(self, 'complete_' + cmd)
+ except AttributeError:
+ compfunc = self.completedefault
+ else:
+ compfunc = self.completenames
+ self.completion_matches = compfunc(text, line, begidx, endidx)
+ try:
+ return self.completion_matches[state]
+ except IndexError:
+ return None
+
+ def get_names(self):
+ # Inheritance says we have to look in class and
+ # base classes; order is not important.
+ names = []
+ classes = [self.__class__]
+ while classes:
+ aclass = classes.pop(0)
+ if aclass.__bases__:
+ classes = classes + list(aclass.__bases__)
+ names = names + dir(aclass)
+ return names
+
+ def complete_help(self, *args):
+ return self.completenames(*args)
+
+ def do_help(self, arg):
+ if arg:
+ # XXX check arg syntax
+ try:
+ func = getattr(self, 'help_' + arg)
+ except AttributeError:
+ try:
+ doc=getattr(self, 'do_' + arg).__doc__
+ if doc:
+ self.stdout.write("%s\n"%str(doc))
+ return
+ except AttributeError:
+ pass
+ self.stdout.write("%s\n"%str(self.nohelp % (arg,)))
+ return
+ func()
+ else:
+ names = self.get_names()
+ cmds_doc = []
+ cmds_undoc = []
+ help = {}
+ for name in names:
+ if name[:5] == 'help_':
+ help[name[5:]]=1
+ names.sort()
+ # There can be duplicates if routines overridden
+ prevname = ''
+ for name in names:
+ if name[:3] == 'do_':
+ if name == prevname:
+ continue
+ prevname = name
+ cmd=name[3:]
+ if cmd in help:
+ cmds_doc.append(cmd)
+ del help[cmd]
+ elif getattr(self, name).__doc__:
+ cmds_doc.append(cmd)
+ else:
+ cmds_undoc.append(cmd)
+ self.stdout.write("%s\n"%str(self.doc_leader))
+ self.print_topics(self.doc_header, cmds_doc, 15,80)
+ self.print_topics(self.misc_header, help.keys(),15,80)
+ self.print_topics(self.undoc_header, cmds_undoc, 15,80)
+
+ def print_topics(self, header, cmds, cmdlen, maxcol):
+ if cmds:
+ self.stdout.write("%s\n"%str(header))
+ if self.ruler:
+ self.stdout.write("%s\n"%str(self.ruler * len(header)))
+ self.columnize(cmds, maxcol-1)
+ self.stdout.write("\n")
+
+ def columnize(self, list, displaywidth=80):
+ """Display a list of strings as a compact set of columns.
+
+ Each column is only as wide as necessary.
+ Columns are separated by two spaces (one was not legible enough).
+ """
+ if not list:
+ self.stdout.write("<empty>\n")
+ return
+ nonstrings = [i for i in range(len(list))
+ if not isinstance(list[i], str)]
+ if nonstrings:
+ raise TypeError, ("list[i] not a string for i in %s" %
+ ", ".join(map(str, nonstrings)))
+ size = len(list)
+ if size == 1:
+ self.stdout.write('%s\n'%str(list[0]))
+ return
+ # Try every row count from 1 upwards
+ for nrows in range(1, len(list)):
+ ncols = (size+nrows-1) // nrows
+ colwidths = []
+ totwidth = -2
+ for col in range(ncols):
+ colwidth = 0
+ for row in range(nrows):
+ i = row + nrows*col
+ if i >= size:
+ break
+ x = list[i]
+ colwidth = max(colwidth, len(x))
+ colwidths.append(colwidth)
+ totwidth += colwidth + 2
+ if totwidth > displaywidth:
+ break
+ if totwidth <= displaywidth:
+ break
+ else:
+ nrows = len(list)
+ ncols = 1
+ colwidths = [0]
+ for row in range(nrows):
+ texts = []
+ for col in range(ncols):
+ i = row + nrows*col
+ if i >= size:
+ x = ""
+ else:
+ x = list[i]
+ texts.append(x)
+ while texts and not texts[-1]:
+ del texts[-1]
+ for col in range(len(texts)):
+ texts[col] = texts[col].ljust(colwidths[col])
+ self.stdout.write("%s\n"%str(" ".join(texts)))
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/code.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,307 @@
+"""Utilities needed to emulate Python's interactive interpreter.
+
+"""
+
+# Inspired by similar code by Jeff Epler and Fredrik Lundh.
+
+
+import sys
+import traceback
+from codeop import CommandCompiler, compile_command
+
+__all__ = ["InteractiveInterpreter", "InteractiveConsole", "interact",
+ "compile_command"]
+
+def softspace(file, newvalue):
+ oldvalue = 0
+ try:
+ oldvalue = file.softspace
+ except AttributeError:
+ pass
+ try:
+ file.softspace = newvalue
+ except (AttributeError, TypeError):
+ # "attribute-less object" or "read-only attributes"
+ pass
+ return oldvalue
+
+class InteractiveInterpreter:
+ """Base class for InteractiveConsole.
+
+ This class deals with parsing and interpreter state (the user's
+ namespace); it doesn't deal with input buffering or prompting or
+ input file naming (the filename is always passed in explicitly).
+
+ """
+
+ def __init__(self, locals=None):
+ """Constructor.
+
+ The optional 'locals' argument specifies the dictionary in
+ which code will be executed; it defaults to a newly created
+ dictionary with key "__name__" set to "__console__" and key
+ "__doc__" set to None.
+
+ """
+ if locals is None:
+ locals = {"__name__": "__console__", "__doc__": None}
+ self.locals = locals
+ self.compile = CommandCompiler()
+
+ def runsource(self, source, filename="<input>", symbol="single"):
+ """Compile and run some source in the interpreter.
+
+ Arguments are as for compile_command().
+
+ One several things can happen:
+
+ 1) The input is incorrect; compile_command() raised an
+ exception (SyntaxError or OverflowError). A syntax traceback
+ will be printed by calling the showsyntaxerror() method.
+
+ 2) The input is incomplete, and more input is required;
+ compile_command() returned None. Nothing happens.
+
+ 3) The input is complete; compile_command() returned a code
+ object. The code is executed by calling self.runcode() (which
+ also handles run-time exceptions, except for SystemExit).
+
+ The return value is True in case 2, False in the other cases (unless
+ an exception is raised). The return value can be used to
+ decide whether to use sys.ps1 or sys.ps2 to prompt the next
+ line.
+
+ """
+ try:
+ code = self.compile(source, filename, symbol)
+ except (OverflowError, SyntaxError, ValueError):
+ # Case 1
+ self.showsyntaxerror(filename)
+ return False
+
+ if code is None:
+ # Case 2
+ return True
+
+ # Case 3
+ self.runcode(code)
+ return False
+
+ def runcode(self, code):
+ """Execute a code object.
+
+ When an exception occurs, self.showtraceback() is called to
+ display a traceback. All exceptions are caught except
+ SystemExit, which is reraised.
+
+ A note about KeyboardInterrupt: this exception may occur
+ elsewhere in this code, and may not always be caught. The
+ caller should be prepared to deal with it.
+
+ """
+ try:
+ exec code in self.locals
+ except SystemExit:
+ raise
+ except:
+ self.showtraceback()
+ else:
+ if softspace(sys.stdout, 0):
+
+ def showsyntaxerror(self, filename=None):
+ """Display the syntax error that just occurred.
+
+ This doesn't display a stack trace because there isn't one.
+
+ If a filename is given, it is stuffed in the exception instead
+ of what was there before (because Python's parser always uses
+ "<string>" when reading from a string).
+
+ The output is written by self.write(), below.
+
+ """
+ type, value, sys.last_traceback = sys.exc_info()
+ sys.last_type = type
+ sys.last_value = value
+ if filename and type is SyntaxError:
+ # Work hard to stuff the correct filename in the exception
+ try:
+ msg, (dummy_filename, lineno, offset, line) = value
+ except:
+ # Not the format we expect; leave it alone
+ pass
+ else:
+ # Stuff in the right filename
+ value = SyntaxError(msg, (filename, lineno, offset, line))
+ sys.last_value = value
+ list = traceback.format_exception_only(type, value)
+ map(self.write, list)
+
+ def showtraceback(self):
+ """Display the exception that just occurred.
+
+ We remove the first stack item because it is our own code.
+
+ The output is written by self.write(), below.
+
+ """
+ try:
+ type, value, tb = sys.exc_info()
+ sys.last_type = type
+ sys.last_value = value
+ sys.last_traceback = tb
+ tblist = traceback.extract_tb(tb)
+ del tblist[:1]
+ list = traceback.format_list(tblist)
+ if list:
+ list.insert(0, "Traceback (most recent call last):\n")
+ list[len(list):] = traceback.format_exception_only(type, value)
+ finally:
+ tblist = tb = None
+ map(self.write, list)
+
+ def write(self, data):
+ """Write a string.
+
+ The base implementation writes to sys.stderr; a subclass may
+ replace this with a different implementation.
+
+ """
+ sys.stderr.write(data)
+
+
+class InteractiveConsole(InteractiveInterpreter):
+ """Closely emulate the behavior of the interactive Python interpreter.
+
+ This class builds on InteractiveInterpreter and adds prompting
+ using the familiar sys.ps1 and sys.ps2, and input buffering.
+
+ """
+
+ def __init__(self, locals=None, filename="<console>"):
+ """Constructor.
+
+ The optional locals argument will be passed to the
+ InteractiveInterpreter base class.
+
+ The optional filename argument should specify the (file)name
+ of the input stream; it will show up in tracebacks.
+
+ """
+ InteractiveInterpreter.__init__(self, locals)
+ self.filename = filename
+ self.resetbuffer()
+
+ def resetbuffer(self):
+ """Reset the input buffer."""
+ self.buffer = []
+
+ def interact(self, banner=None):
+ """Closely emulate the interactive Python console.
+
+ The optional banner argument specify the banner to print
+ before the first interaction; by default it prints a banner
+ similar to the one printed by the real Python interpreter,
+ followed by the current class name in parentheses (so as not
+ to confuse this with the real interpreter -- since it's so
+ close!).
+
+ """
+ try:
+ sys.ps1
+ except AttributeError:
+ sys.ps1 = ">>> "
+ try:
+ sys.ps2
+ except AttributeError:
+ sys.ps2 = "... "
+ cprt = 'Type "help", "copyright", "credits" or "license" for more information.'
+ if banner is None:
+ self.write("Python %s on %s\n%s\n(%s)\n" %
+ (sys.version, sys.platform, cprt,
+ self.__class__.__name__))
+ else:
+ self.write("%s\n" % str(banner))
+ more = 0
+ while 1:
+ try:
+ if more:
+ prompt = sys.ps2
+ else:
+ prompt = sys.ps1
+ try:
+ line = self.raw_input(prompt)
+ except EOFError:
+ self.write("\n")
+ break
+ else:
+ more = self.push(line)
+ except KeyboardInterrupt:
+ self.write("\nKeyboardInterrupt\n")
+ self.resetbuffer()
+ more = 0
+
+ def push(self, line):
+ """Push a line to the interpreter.
+
+ The line should not have a trailing newline; it may have
+ internal newlines. The line is appended to a buffer and the
+ interpreter's runsource() method is called with the
+ concatenated contents of the buffer as source. If this
+ indicates that the command was executed or invalid, the buffer
+ is reset; otherwise, the command is incomplete, and the buffer
+ is left as it was after the line was appended. The return
+ value is 1 if more input is required, 0 if the line was dealt
+ with in some way (this is the same as runsource()).
+
+ """
+ self.buffer.append(line)
+ source = "\n".join(self.buffer)
+ more = self.runsource(source, self.filename)
+ if not more:
+ self.resetbuffer()
+ return more
+
+ def raw_input(self, prompt=""):
+ """Write a prompt and read a line.
+
+ The returned line does not include the trailing newline.
+ When the user enters the EOF key sequence, EOFError is raised.
+
+ The base implementation uses the built-in function
+ raw_input(); a subclass may replace this with a different
+ implementation.
+
+ """
+ return raw_input(prompt)
+
+
+def interact(banner=None, readfunc=None, local=None):
+ """Closely emulate the interactive Python interpreter.
+
+ This is a backwards compatible interface to the InteractiveConsole
+ class. When readfunc is not specified, it attempts to import the
+ readline module to enable GNU readline if it is available.
+
+ Arguments (all optional, all default to None):
+
+ banner -- passed to InteractiveConsole.interact()
+ readfunc -- if not None, replaces InteractiveConsole.raw_input()
+ local -- passed to InteractiveInterpreter.__init__()
+
+ """
+ console = InteractiveConsole(local)
+ if readfunc is not None:
+ console.raw_input = readfunc
+ else:
+ try:
+ import readline
+ except ImportError:
+ pass
+ console.interact(banner)
+
+
+if __name__ == '__main__':
+ import pdb
+ pdb.run("interact()\n")
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/codecs.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,1034 @@
+""" codecs -- Python Codec Registry, API and helpers.
+
+
+Written by Marc-Andre Lemburg (mal@lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+
+"""#"
+
+import __builtin__, sys
+
+### Registry and builtin stateless codec functions
+
+try:
+ from _codecs import *
+except ImportError, why:
+ raise SystemError('Failed to load the builtin codecs: %s' % why)
+
+__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
+ "BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE",
+ "BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE",
+ "BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE",
+ "strict_errors", "ignore_errors", "replace_errors",
+ "xmlcharrefreplace_errors",
+ "register_error", "lookup_error"]
+
+### Constants
+
+#
+# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF)
+# and its possible byte string values
+# for UTF8/UTF16/UTF32 output and little/big endian machines
+#
+
+# UTF-8
+BOM_UTF8 = '\xef\xbb\xbf'
+
+# UTF-16, little endian
+BOM_LE = BOM_UTF16_LE = '\xff\xfe'
+
+# UTF-16, big endian
+BOM_BE = BOM_UTF16_BE = '\xfe\xff'
+
+# UTF-32, little endian
+BOM_UTF32_LE = '\xff\xfe\x00\x00'
+
+# UTF-32, big endian
+BOM_UTF32_BE = '\x00\x00\xfe\xff'
+
+if sys.byteorder == 'little':
+
+ # UTF-16, native endianness
+ BOM = BOM_UTF16 = BOM_UTF16_LE
+
+ # UTF-32, native endianness
+ BOM_UTF32 = BOM_UTF32_LE
+
+else:
+
+ # UTF-16, native endianness
+ BOM = BOM_UTF16 = BOM_UTF16_BE
+
+ # UTF-32, native endianness
+ BOM_UTF32 = BOM_UTF32_BE
+
+# Old broken names (don't use in new code)
+BOM32_LE = BOM_UTF16_LE
+BOM32_BE = BOM_UTF16_BE
+BOM64_LE = BOM_UTF32_LE
+BOM64_BE = BOM_UTF32_BE
+
+
+### Codec base classes (defining the API)
+
+class CodecInfo(tuple):
+
+ def __new__(cls, encode, decode, streamreader=None, streamwriter=None,
+ incrementalencoder=None, incrementaldecoder=None, name=None):
+ self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter))
+ self.name = name
+ self.encode = encode
+ self.decode = decode
+ self.incrementalencoder = incrementalencoder
+ self.incrementaldecoder = incrementaldecoder
+ self.streamwriter = streamwriter
+ self.streamreader = streamreader
+ return self
+
+ def __repr__(self):
+ return "<%s.%s object for encoding %s at 0x%x>" % (self.__class__.__module__, self.__class__.__name__, self.name, id(self))
+
+class Codec:
+
+ """ Defines the interface for stateless encoders/decoders.
+
+ The .encode()/.decode() methods may use different error
+ handling schemes by providing the errors argument. These
+ string values are predefined:
+
+ 'strict' - raise a ValueError error (or a subclass)
+ 'ignore' - ignore the character and continue with the next
+ 'replace' - replace with a suitable replacement character;
+ Python will use the official U+FFFD REPLACEMENT
+ CHARACTER for the builtin Unicode codecs on
+ decoding and '?' on encoding.
+ 'xmlcharrefreplace' - Replace with the appropriate XML
+ character reference (only for encoding).
+ 'backslashreplace' - Replace with backslashed escape sequences
+ (only for encoding).
+
+ The set of allowed values can be extended via register_error.
+
+ """
+ def encode(self, input, errors='strict'):
+
+ """ Encodes the object input and returns a tuple (output
+ object, length consumed).
+
+ errors defines the error handling to apply. It defaults to
+ 'strict' handling.
+
+ The method may not store state in the Codec instance. Use
+ StreamCodec for codecs which have to keep state in order to
+ make encoding/decoding efficient.
+
+ The encoder must be able to handle zero length input and
+ return an empty object of the output object type in this
+ situation.
+
+ """
+ raise NotImplementedError
+
+ def decode(self, input, errors='strict'):
+
+ """ Decodes the object input and returns a tuple (output
+ object, length consumed).
+
+ input must be an object which provides the bf_getreadbuf
+ buffer slot. Python strings, buffer objects and memory
+ mapped files are examples of objects providing this slot.
+
+ errors defines the error handling to apply. It defaults to
+ 'strict' handling.
+
+ The method may not store state in the Codec instance. Use
+ StreamCodec for codecs which have to keep state in order to
+ make encoding/decoding efficient.
+
+ The decoder must be able to handle zero length input and
+ return an empty object of the output object type in this
+ situation.
+
+ """
+ raise NotImplementedError
+
+class IncrementalEncoder(object):
+ """
+ An IncrementalEncoder encodes an input in multiple steps. The input can be
+ passed piece by piece to the encode() method. The IncrementalEncoder remembers
+ the state of the Encoding process between calls to encode().
+ """
+ def __init__(self, errors='strict'):
+ """
+ Creates an IncrementalEncoder instance.
+
+ The IncrementalEncoder may use different error handling schemes by
+ providing the errors keyword argument. See the module docstring
+ for a list of possible values.
+ """
+ self.errors = errors
+ self.buffer = ""
+
+ def encode(self, input, final=False):
+ """
+ Encodes input and returns the resulting object.
+ """
+ raise NotImplementedError
+
+ def reset(self):
+ """
+ Resets the encoder to the initial state.
+ """
+
+class BufferedIncrementalEncoder(IncrementalEncoder):
+ """
+ This subclass of IncrementalEncoder can be used as the baseclass for an
+ incremental encoder if the encoder must keep some of the output in a
+ buffer between calls to encode().
+ """
+ def __init__(self, errors='strict'):
+ IncrementalEncoder.__init__(self, errors)
+ self.buffer = "" # unencoded input that is kept between calls to encode()
+
+ def _buffer_encode(self, input, errors, final):
+ # Overwrite this method in subclasses: It must encode input
+ # and return an (output, length consumed) tuple
+ raise NotImplementedError
+
+ def encode(self, input, final=False):
+ # encode input (taking the buffer into account)
+ data = self.buffer + input
+ (result, consumed) = self._buffer_encode(data, self.errors, final)
+ # keep unencoded input until the next call
+ self.buffer = data[consumed:]
+ return result
+
+ def reset(self):
+ IncrementalEncoder.reset(self)
+ self.buffer = ""
+
+class IncrementalDecoder(object):
+ """
+ An IncrementalDecoder decodes an input in multiple steps. The input can be
+ passed piece by piece to the decode() method. The IncrementalDecoder
+ remembers the state of the decoding process between calls to decode().
+ """
+ def __init__(self, errors='strict'):
+ """
+ Creates a IncrementalDecoder instance.
+
+ The IncrementalDecoder may use different error handling schemes by
+ providing the errors keyword argument. See the module docstring
+ for a list of possible values.
+ """
+ self.errors = errors
+
+ def decode(self, input, final=False):
+ """
+ Decodes input and returns the resulting object.
+ """
+ raise NotImplementedError
+
+ def reset(self):
+ """
+ Resets the decoder to the initial state.
+ """
+
+class BufferedIncrementalDecoder(IncrementalDecoder):
+ """
+ This subclass of IncrementalDecoder can be used as the baseclass for an
+ incremental decoder if the decoder must be able to handle incomplete byte
+ sequences.
+ """
+ def __init__(self, errors='strict'):
+ IncrementalDecoder.__init__(self, errors)
+ self.buffer = "" # undecoded input that is kept between calls to decode()
+
+ def _buffer_decode(self, input, errors, final):
+ # Overwrite this method in subclasses: It must decode input
+ # and return an (output, length consumed) tuple
+ raise NotImplementedError
+
+ def decode(self, input, final=False):
+ # decode input (taking the buffer into account)
+ data = self.buffer + input
+ (result, consumed) = self._buffer_decode(data, self.errors, final)
+ # keep undecoded input until the next call
+ self.buffer = data[consumed:]
+ return result
+
+ def reset(self):
+ IncrementalDecoder.reset(self)
+ self.buffer = ""
+
+#
+# The StreamWriter and StreamReader class provide generic working
+# interfaces which can be used to implement new encoding submodules
+# very easily. See encodings/utf_8.py for an example on how this is
+# done.
+#
+
+class StreamWriter(Codec):
+
+ def __init__(self, stream, errors='strict'):
+
+ """ Creates a StreamWriter instance.
+
+ stream must be a file-like object open for writing
+ (binary) data.
+
+ The StreamWriter may use different error handling
+ schemes by providing the errors keyword argument. These
+ parameters are predefined:
+
+ 'strict' - raise a ValueError (or a subclass)
+ 'ignore' - ignore the character and continue with the next
+ 'replace'- replace with a suitable replacement character
+ 'xmlcharrefreplace' - Replace with the appropriate XML
+ character reference.
+ 'backslashreplace' - Replace with backslashed escape
+ sequences (only for encoding).
+
+ The set of allowed parameter values can be extended via
+ register_error.
+ """
+ self.stream = stream
+ self.errors = errors
+
+ def write(self, object):
+
+ """ Writes the object's contents encoded to self.stream.
+ """
+ data, consumed = self.encode(object, self.errors)
+ self.stream.write(data)
+
+ def writelines(self, list):
+
+ """ Writes the concatenated list of strings to the stream
+ using .write().
+ """
+ self.write(''.join(list))
+
+ def reset(self):
+
+ """ Flushes and resets the codec buffers used for keeping state.
+
+ Calling this method should ensure that the data on the
+ output is put into a clean state, that allows appending
+ of new fresh data without having to rescan the whole
+ stream to recover state.
+
+ """
+ pass
+
+ def __getattr__(self, name,
+ getattr=getattr):
+
+ """ Inherit all other methods from the underlying stream.
+ """
+ return getattr(self.stream, name)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, tb):
+ self.stream.close()
+
+###
+
+class StreamReader(Codec):
+
+ def __init__(self, stream, errors='strict'):
+
+ """ Creates a StreamReader instance.
+
+ stream must be a file-like object open for reading
+ (binary) data.
+
+ The StreamReader may use different error handling
+ schemes by providing the errors keyword argument. These
+ parameters are predefined:
+
+ 'strict' - raise a ValueError (or a subclass)
+ 'ignore' - ignore the character and continue with the next
+ 'replace'- replace with a suitable replacement character;
+
+ The set of allowed parameter values can be extended via
+ register_error.
+ """
+ self.stream = stream
+ self.errors = errors
+ self.bytebuffer = ""
+ # For str->str decoding this will stay a str
+ # For str->unicode decoding the first read will promote it to unicode
+ self.charbuffer = ""
+ self.linebuffer = None
+
+ def decode(self, input, errors='strict'):
+ raise NotImplementedError
+
+ def read(self, size=-1, chars=-1, firstline=False):
+
+ """ Decodes data from the stream self.stream and returns the
+ resulting object.
+
+ chars indicates the number of characters to read from the
+ stream. read() will never return more than chars
+ characters, but it might return less, if there are not enough
+ characters available.
+
+ size indicates the approximate maximum number of bytes to
+ read from the stream for decoding purposes. The decoder
+ can modify this setting as appropriate. The default value
+ -1 indicates to read and decode as much as possible. size
+ is intended to prevent having to decode huge files in one
+ step.
+
+ If firstline is true, and a UnicodeDecodeError happens
+ after the first line terminator in the input only the first line
+ will be returned, the rest of the input will be kept until the
+ next call to read().
+
+ The method should use a greedy read strategy meaning that
+ it should read as much data as is allowed within the
+ definition of the encoding and the given size, e.g. if
+ optional encoding endings or state markers are available
+ on the stream, these should be read too.
+ """
+ # If we have lines cached, first merge them back into characters
+ if self.linebuffer:
+ self.charbuffer = "".join(self.linebuffer)
+ self.linebuffer = None
+
+ # read until we get the required number of characters (if available)
+ while True:
+ # can the request can be satisfied from the character buffer?
+ if chars < 0:
+ if size < 0:
+ if self.charbuffer:
+ break
+ elif len(self.charbuffer) >= size:
+ break
+ else:
+ if len(self.charbuffer) >= chars:
+ break
+ # we need more data
+ if size < 0:
+ newdata = self.stream.read()
+ else:
+ newdata = self.stream.read(size)
+ # decode bytes (those remaining from the last call included)
+ data = self.bytebuffer + newdata
+ try:
+ newchars, decodedbytes = self.decode(data, self.errors)
+ except UnicodeDecodeError, exc:
+ if firstline:
+ newchars, decodedbytes = self.decode(data[:exc.start], self.errors)
+ lines = newchars.splitlines(True)
+ if len(lines)<=1:
+ raise
+ else:
+ raise
+ # keep undecoded bytes until the next call
+ self.bytebuffer = data[decodedbytes:]
+ # put new characters in the character buffer
+ self.charbuffer += newchars
+ # there was no data available
+ if not newdata:
+ break
+ if chars < 0:
+ # Return everything we've got
+ result = self.charbuffer
+ self.charbuffer = ""
+ else:
+ # Return the first chars characters
+ result = self.charbuffer[:chars]
+ self.charbuffer = self.charbuffer[chars:]
+ return result
+
+ def readline(self, size=None, keepends=True):
+
+ """ Read one line from the input stream and return the
+ decoded data.
+
+ size, if given, is passed as size argument to the
+ read() method.
+
+ """
+ # If we have lines cached from an earlier read, return
+ # them unconditionally
+ if self.linebuffer:
+ line = self.linebuffer[0]
+ del self.linebuffer[0]
+ if len(self.linebuffer) == 1:
+ # revert to charbuffer mode; we might need more data
+ # next time
+ self.charbuffer = self.linebuffer[0]
+ self.linebuffer = None
+ if not keepends:
+ line = line.splitlines(False)[0]
+ return line
+
+ readsize = size or 72
+ line = ""
+ # If size is given, we call read() only once
+ while True:
+ data = self.read(readsize, firstline=True)
+ if data:
+ # If we're at a "\r" read one extra character (which might
+ # be a "\n") to get a proper line ending. If the stream is
+ # temporarily exhausted we return the wrong line ending.
+ if data.endswith("\r"):
+ data += self.read(size=1, chars=1)
+
+ line += data
+ lines = line.splitlines(True)
+ if lines:
+ if len(lines) > 1:
+ # More than one line result; the first line is a full line
+ # to return
+ line = lines[0]
+ del lines[0]
+ if len(lines) > 1:
+ # cache the remaining lines
+ lines[-1] += self.charbuffer
+ self.linebuffer = lines
+ self.charbuffer = None
+ else:
+ # only one remaining line, put it back into charbuffer
+ self.charbuffer = lines[0] + self.charbuffer
+ if not keepends:
+ line = line.splitlines(False)[0]
+ break
+ line0withend = lines[0]
+ line0withoutend = lines[0].splitlines(False)[0]
+ if line0withend != line0withoutend: # We really have a line end
+ # Put the rest back together and keep it until the next call
+ self.charbuffer = "".join(lines[1:]) + self.charbuffer
+ if keepends:
+ line = line0withend
+ else:
+ line = line0withoutend
+ break
+ # we didn't get anything or this was our only try
+ if not data or size is not None:
+ if line and not keepends:
+ line = line.splitlines(False)[0]
+ break
+ if readsize<8000:
+ readsize *= 2
+ return line
+
+ def readlines(self, sizehint=None, keepends=True):
+
+ """ Read all lines available on the input stream
+ and return them as list of lines.
+
+ Line breaks are implemented using the codec's decoder
+ method and are included in the list entries.
+
+ sizehint, if given, is ignored since there is no efficient
+ way to finding the true end-of-line.
+
+ """
+ data = self.read()
+ return data.splitlines(keepends)
+
+ def reset(self):
+
+ """ Resets the codec buffers used for keeping state.
+
+ Note that no stream repositioning should take place.
+ This method is primarily intended to be able to recover
+ from decoding errors.
+
+ """
+ self.bytebuffer = ""
+ self.charbuffer = u""
+ self.linebuffer = None
+
+ def seek(self, offset, whence=0):
+ """ Set the input stream's current position.
+
+ Resets the codec buffers used for keeping state.
+ """
+ self.reset()
+ self.stream.seek(offset, whence)
+
+ def next(self):
+
+ """ Return the next decoded line from the input stream."""
+ line = self.readline()
+ if line:
+ return line
+ raise StopIteration
+
+ def __iter__(self):
+ return self
+
+ def __getattr__(self, name,
+ getattr=getattr):
+
+ """ Inherit all other methods from the underlying stream.
+ """
+ return getattr(self.stream, name)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, tb):
+ self.stream.close()
+
+###
+
+class StreamReaderWriter:
+
+ """ StreamReaderWriter instances allow wrapping streams which
+ work in both read and write modes.
+
+ The design is such that one can use the factory functions
+ returned by the codec.lookup() function to construct the
+ instance.
+
+ """
+ # Optional attributes set by the file wrappers below
+ encoding = 'unknown'
+
+ def __init__(self, stream, Reader, Writer, errors='strict'):
+
+ """ Creates a StreamReaderWriter instance.
+
+ stream must be a Stream-like object.
+
+ Reader, Writer must be factory functions or classes
+ providing the StreamReader, StreamWriter interface resp.
+
+ Error handling is done in the same way as defined for the
+ StreamWriter/Readers.
+
+ """
+ self.stream = stream
+ self.reader = Reader(stream, errors)
+ self.writer = Writer(stream, errors)
+ self.errors = errors
+
+ def read(self, size=-1):
+
+ return self.reader.read(size)
+
+ def readline(self, size=None):
+
+ return self.reader.readline(size)
+
+ def readlines(self, sizehint=None):
+
+ return self.reader.readlines(sizehint)
+
+ def next(self):
+
+ """ Return the next decoded line from the input stream."""
+ return self.reader.next()
+
+ def __iter__(self):
+ return self
+
+ def write(self, data):
+
+ return self.writer.write(data)
+
+ def writelines(self, list):
+
+ return self.writer.writelines(list)
+
+ def reset(self):
+
+ self.reader.reset()
+ self.writer.reset()
+
+ def __getattr__(self, name,
+ getattr=getattr):
+
+ """ Inherit all other methods from the underlying stream.
+ """
+ return getattr(self.stream, name)
+
+ # these are needed to make "with codecs.open(...)" work properly
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, tb):
+ self.stream.close()
+
+###
+
+class StreamRecoder:
+
+ """ StreamRecoder instances provide a frontend - backend
+ view of encoding data.
+
+ They use the complete set of APIs returned by the
+ codecs.lookup() function to implement their task.
+
+ Data written to the stream is first decoded into an
+ intermediate format (which is dependent on the given codec
+ combination) and then written to the stream using an instance
+ of the provided Writer class.
+
+ In the other direction, data is read from the stream using a
+ Reader instance and then return encoded data to the caller.
+
+ """
+ # Optional attributes set by the file wrappers below
+ data_encoding = 'unknown'
+ file_encoding = 'unknown'
+
+ def __init__(self, stream, encode, decode, Reader, Writer,
+ errors='strict'):
+
+ """ Creates a StreamRecoder instance which implements a two-way
+ conversion: encode and decode work on the frontend (the
+ input to .read() and output of .write()) while
+ Reader and Writer work on the backend (reading and
+ writing to the stream).
+
+ You can use these objects to do transparent direct
+ recodings from e.g. latin-1 to utf-8 and back.
+
+ stream must be a file-like object.
+
+ encode, decode must adhere to the Codec interface, Reader,
+ Writer must be factory functions or classes providing the
+ StreamReader, StreamWriter interface resp.
+
+ encode and decode are needed for the frontend translation,
+ Reader and Writer for the backend translation. Unicode is
+ used as intermediate encoding.
+
+ Error handling is done in the same way as defined for the
+ StreamWriter/Readers.
+
+ """
+ self.stream = stream
+ self.encode = encode
+ self.decode = decode
+ self.reader = Reader(stream, errors)
+ self.writer = Writer(stream, errors)
+ self.errors = errors
+
+ def read(self, size=-1):
+
+ data = self.reader.read(size)
+ data, bytesencoded = self.encode(data, self.errors)
+ return data
+
+ def readline(self, size=None):
+
+ if size is None:
+ data = self.reader.readline()
+ else:
+ data = self.reader.readline(size)
+ data, bytesencoded = self.encode(data, self.errors)
+ return data
+
+ def readlines(self, sizehint=None):
+
+ data = self.reader.read()
+ data, bytesencoded = self.encode(data, self.errors)
+ return data.splitlines(1)
+
+ def next(self):
+
+ """ Return the next decoded line from the input stream."""
+ data = self.reader.next()
+ data, bytesencoded = self.encode(data, self.errors)
+ return data
+
+ def __iter__(self):
+ return self
+
+ def write(self, data):
+
+ data, bytesdecoded = self.decode(data, self.errors)
+ return self.writer.write(data)
+
+ def writelines(self, list):
+
+ data = ''.join(list)
+ data, bytesdecoded = self.decode(data, self.errors)
+ return self.writer.write(data)
+
+ def reset(self):
+
+ self.reader.reset()
+ self.writer.reset()
+
+ def __getattr__(self, name,
+ getattr=getattr):
+
+ """ Inherit all other methods from the underlying stream.
+ """
+ return getattr(self.stream, name)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, tb):
+ self.stream.close()
+
+### Shortcuts
+
+def open(filename, mode='rb', encoding=None, errors='strict', buffering=1):
+
+ """ Open an encoded file using the given mode and return
+ a wrapped version providing transparent encoding/decoding.
+
+ Note: The wrapped version will only accept the object format
+ defined by the codecs, i.e. Unicode objects for most builtin
+ codecs. Output is also codec dependent and will usually be
+ Unicode as well.
+
+ Files are always opened in binary mode, even if no binary mode
+ was specified. This is done to avoid data loss due to encodings
+ using 8-bit values. The default file mode is 'rb' meaning to
+ open the file in binary read mode.
+
+ encoding specifies the encoding which is to be used for the
+ file.
+
+ errors may be given to define the error handling. It defaults
+ to 'strict' which causes ValueErrors to be raised in case an
+ encoding error occurs.
+
+ buffering has the same meaning as for the builtin open() API.
+ It defaults to line buffered.
+
+ The returned wrapped file object provides an extra attribute
+ .encoding which allows querying the used encoding. This
+ attribute is only available if an encoding was specified as
+ parameter.
+
+ """
+ if encoding is not None and \
+ 'b' not in mode:
+ # Force opening of the file in binary mode
+ mode = mode + 'b'
+ file = __builtin__.open(filename, mode, buffering)
+ if encoding is None:
+ return file
+ info = lookup(encoding)
+ srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors)
+ # Add attributes to simplify introspection
+ srw.encoding = encoding
+ return srw
+
+def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
+
+ """ Return a wrapped version of file which provides transparent
+ encoding translation.
+
+ Strings written to the wrapped file are interpreted according
+ to the given data_encoding and then written to the original
+ file as string using file_encoding. The intermediate encoding
+ will usually be Unicode but depends on the specified codecs.
+
+ Strings are read from the file using file_encoding and then
+ passed back to the caller as string using data_encoding.
+
+ If file_encoding is not given, it defaults to data_encoding.
+
+ errors may be given to define the error handling. It defaults
+ to 'strict' which causes ValueErrors to be raised in case an
+ encoding error occurs.
+
+ The returned wrapped file object provides two extra attributes
+ .data_encoding and .file_encoding which reflect the given
+ parameters of the same name. The attributes can be used for
+ introspection by Python programs.
+
+ """
+ if file_encoding is None:
+ file_encoding = data_encoding
+ data_info = lookup(data_encoding)
+ file_info = lookup(file_encoding)
+ sr = StreamRecoder(file, data_info.encode, data_info.decode,
+ file_info.streamreader, file_info.streamwriter, errors)
+ # Add attributes to simplify introspection
+ sr.data_encoding = data_encoding
+ sr.file_encoding = file_encoding
+ return sr
+
+### Helpers for codec lookup
+
+def getencoder(encoding):
+
+ """ Lookup up the codec for the given encoding and return
+ its encoder function.
+
+ Raises a LookupError in case the encoding cannot be found.
+
+ """
+ return lookup(encoding).encode
+
+def getdecoder(encoding):
+
+ """ Lookup up the codec for the given encoding and return
+ its decoder function.
+
+ Raises a LookupError in case the encoding cannot be found.
+
+ """
+ return lookup(encoding).decode
+
+def getincrementalencoder(encoding):
+
+ """ Lookup up the codec for the given encoding and return
+ its IncrementalEncoder class or factory function.
+
+ Raises a LookupError in case the encoding cannot be found
+ or the codecs doesn't provide an incremental encoder.
+
+ """
+ encoder = lookup(encoding).incrementalencoder
+ if encoder is None:
+ raise LookupError(encoding)
+ return encoder
+
+def getincrementaldecoder(encoding):
+
+ """ Lookup up the codec for the given encoding and return
+ its IncrementalDecoder class or factory function.
+
+ Raises a LookupError in case the encoding cannot be found
+ or the codecs doesn't provide an incremental decoder.
+
+ """
+ decoder = lookup(encoding).incrementaldecoder
+ if decoder is None:
+ raise LookupError(encoding)
+ return decoder
+
+def getreader(encoding):
+
+ """ Lookup up the codec for the given encoding and return
+ its StreamReader class or factory function.
+
+ Raises a LookupError in case the encoding cannot be found.
+
+ """
+ return lookup(encoding).streamreader
+
+def getwriter(encoding):
+
+ """ Lookup up the codec for the given encoding and return
+ its StreamWriter class or factory function.
+
+ Raises a LookupError in case the encoding cannot be found.
+
+ """
+ return lookup(encoding).streamwriter
+
+def iterencode(iterator, encoding, errors='strict', **kwargs):
+ """
+ Encoding iterator.
+
+ Encodes the input strings from the iterator using a IncrementalEncoder.
+
+ errors and kwargs are passed through to the IncrementalEncoder
+ constructor.
+ """
+ encoder = getincrementalencoder(encoding)(errors, **kwargs)
+ for input in iterator:
+ output = encoder.encode(input)
+ if output:
+ yield output
+ output = encoder.encode("", True)
+ if output:
+ yield output
+
+def iterdecode(iterator, encoding, errors='strict', **kwargs):
+ """
+ Decoding iterator.
+
+ Decodes the input strings from the iterator using a IncrementalDecoder.
+
+ errors and kwargs are passed through to the IncrementalDecoder
+ constructor.
+ """
+ decoder = getincrementaldecoder(encoding)(errors, **kwargs)
+ for input in iterator:
+ output = decoder.decode(input)
+ if output:
+ yield output
+ output = decoder.decode("", True)
+ if output:
+ yield output
+
+### Helpers for charmap-based codecs
+
+def make_identity_dict(rng):
+
+ """ make_identity_dict(rng) -> dict
+
+ Return a dictionary where elements of the rng sequence are
+ mapped to themselves.
+
+ """
+ res = {}
+ for i in rng:
+ res[i]=i
+ return res
+
+def make_encoding_map(decoding_map):
+
+ """ Creates an encoding map from a decoding map.
+
+ If a target mapping in the decoding map occurs multiple
+ times, then that target is mapped to None (undefined mapping),
+ causing an exception when encountered by the charmap codec
+ during translation.
+
+ One example where this happens is cp875.py which decodes
+ multiple character to \u001a.
+
+ """
+ m = {}
+ for k,v in decoding_map.items():
+ if not v in m:
+ m[v] = k
+ else:
+ m[v] = None
+ return m
+
+### error handlers
+
+try:
+ strict_errors = lookup_error("strict")
+ ignore_errors = lookup_error("ignore")
+ replace_errors = lookup_error("replace")
+ xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace")
+ backslashreplace_errors = lookup_error("backslashreplace")
+except LookupError:
+ # In --disable-unicode builds, these error handler are missing
+ strict_errors = None
+ ignore_errors = None
+ replace_errors = None
+ xmlcharrefreplace_errors = None
+ backslashreplace_errors = None
+
+# Tell modulefinder that using codecs probably needs the encodings
+# package
+_false = 0
+if _false:
+ import encodings
+
+### Tests
+
+if __name__ == '__main__':
+
+ # Make stdout translate Latin-1 output into UTF-8 output
+ sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8')
+
+ # Have stdin translate Latin-1 input into UTF-8 input
+ sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/codeop.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,168 @@
+r"""Utilities to compile possibly incomplete Python source code.
+
+This module provides two interfaces, broadly similar to the builtin
+function compile(), which take program text, a filename and a 'mode'
+and:
+
+- Return code object if the command is complete and valid
+- Return None if the command is incomplete
+- Raise SyntaxError, ValueError or OverflowError if the command is a
+ syntax error (OverflowError and ValueError can be produced by
+ malformed literals).
+
+Approach:
+
+First, check if the source consists entirely of blank lines and
+comments; if so, replace it with 'pass', because the built-in
+parser doesn't always do the right thing for these.
+
+Compile three times: as is, with \n, and with \n\n appended. If it
+compiles as is, it's complete. If it compiles with one \n appended,
+we expect more. If it doesn't compile either way, we compare the
+error we get when compiling with \n or \n\n appended. If the errors
+are the same, the code is broken. But if the errors are different, we
+expect more. Not intuitive; not even guaranteed to hold in future
+releases; but this matches the compiler's behavior from Python 1.4
+through 2.2, at least.
+
+Caveat:
+
+It is possible (but not likely) that the parser stops parsing with a
+successful outcome before reaching the end of the source; in this
+case, trailing symbols may be ignored instead of causing an error.
+For example, a backslash followed by two newlines may be followed by
+arbitrary garbage. This will be fixed once the API for the parser is
+better.
+
+The two interfaces are:
+
+compile_command(source, filename, symbol):
+
+ Compiles a single command in the manner described above.
+
+CommandCompiler():
+
+ Instances of this class have __call__ methods identical in
+ signature to compile_command; the difference is that if the
+ instance compiles program text containing a __future__ statement,
+ the instance 'remembers' and compiles all subsequent program texts
+ with the statement in force.
+
+The module also provides another class:
+
+Compile():
+
+ Instances of this class act like the built-in function compile,
+ but with 'memory' in the sense described above.
+"""
+
+import __future__
+
+_features = [getattr(__future__, fname)
+ for fname in __future__.all_feature_names]
+
+__all__ = ["compile_command", "Compile", "CommandCompiler"]
+
+PyCF_DONT_IMPLY_DEDENT = 0x200 # Matches pythonrun.h
+
+def _maybe_compile(compiler, source, filename, symbol):
+ # Check for source consisting of only blank lines and comments
+ for line in source.split("\n"):
+ line = line.strip()
+ if line and line[0] != '#':
+ break # Leave it alone
+ else:
+ if symbol != "eval":
+ source = "pass" # Replace it with a 'pass' statement
+
+ err = err1 = err2 = None
+ code = code1 = code2 = None
+
+ try:
+ code = compiler(source, filename, symbol)
+ except SyntaxError, err:
+ pass
+
+ try:
+ code1 = compiler(source + "\n", filename, symbol)
+ except SyntaxError, err1:
+ pass
+
+ try:
+ code2 = compiler(source + "\n\n", filename, symbol)
+ except SyntaxError, err2:
+ pass
+
+ if code:
+ return code
+ if not code1 and repr(err1) == repr(err2):
+ raise SyntaxError, err1
+
+def _compile(source, filename, symbol):
+ return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT)
+
+def compile_command(source, filename="<input>", symbol="single"):
+ r"""Compile a command and determine whether it is incomplete.
+
+ Arguments:
+
+ source -- the source string; may contain \n characters
+ filename -- optional filename from which source was read; default
+ "<input>"
+ symbol -- optional grammar start symbol; "single" (default) or "eval"
+
+ Return value / exceptions raised:
+
+ - Return a code object if the command is complete and valid
+ - Return None if the command is incomplete
+ - Raise SyntaxError, ValueError or OverflowError if the command is a
+ syntax error (OverflowError and ValueError can be produced by
+ malformed literals).
+ """
+ return _maybe_compile(_compile, source, filename, symbol)
+
+class Compile:
+ """Instances of this class behave much like the built-in compile
+ function, but if one is used to compile text containing a future
+ statement, it "remembers" and compiles all subsequent program texts
+ with the statement in force."""
+ def __init__(self):
+ self.flags = PyCF_DONT_IMPLY_DEDENT
+
+ def __call__(self, source, filename, symbol):
+ codeob = compile(source, filename, symbol, self.flags, 1)
+ for feature in _features:
+ if codeob.co_flags & feature.compiler_flag:
+ self.flags |= feature.compiler_flag
+ return codeob
+
+class CommandCompiler:
+ """Instances of this class have __call__ methods identical in
+ signature to compile_command; the difference is that if the
+ instance compiles program text containing a __future__ statement,
+ the instance 'remembers' and compiles all subsequent program texts
+ with the statement in force."""
+
+ def __init__(self,):
+ self.compiler = Compile()
+
+ def __call__(self, source, filename="<input>", symbol="single"):
+ r"""Compile a command and determine whether it is incomplete.
+
+ Arguments:
+
+ source -- the source string; may contain \n characters
+ filename -- optional filename from which source was read;
+ default "<input>"
+ symbol -- optional grammar start symbol; "single" (default) or
+ "eval"
+
+ Return value / exceptions raised:
+
+ - Return a code object if the command is complete and valid
+ - Return None if the command is incomplete
+ - Raise SyntaxError, ValueError or OverflowError if the command is a
+ syntax error (OverflowError and ValueError can be produced by
+ malformed literals).
+ """
+ return _maybe_compile(self.compiler, source, filename, symbol)
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/colorsys.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,126 @@
+"""Conversion functions between RGB and other color systems.
+
+This modules provides two functions for each color system ABC:
+
+ rgb_to_abc(r, g, b) --> a, b, c
+ abc_to_rgb(a, b, c) --> r, g, b
+
+All inputs and outputs are triples of floats in the range [0.0...1.0]
+(with the exception of I and Q, which covers a slightly larger range).
+Inputs outside the valid range may cause exceptions or invalid outputs.
+
+Supported color systems:
+RGB: Red, Green, Blue components
+YIQ: Luminance, Chrominance (used by composite video signals)
+HLS: Hue, Luminance, Saturation
+HSV: Hue, Saturation, Value
+"""
+
+# References:
+# http://en.wikipedia.org/wiki/YIQ
+# http://en.wikipedia.org/wiki/HLS_color_space
+# http://en.wikipedia.org/wiki/HSV_color_space
+
+__all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb",
+ "rgb_to_hsv","hsv_to_rgb"]
+
+# Some floating point constants
+
+ONE_THIRD = 1.0/3.0
+ONE_SIXTH = 1.0/6.0
+TWO_THIRD = 2.0/3.0
+
+# YIQ: used by composite video signals (linear combinations of RGB)
+# Y: perceived grey level (0.0 == black, 1.0 == white)
+# I, Q: color components
+
+def rgb_to_yiq(r, g, b):
+ y = 0.30*r + 0.59*g + 0.11*b
+ i = 0.60*r - 0.28*g - 0.32*b
+ q = 0.21*r - 0.52*g + 0.31*b
+ return (y, i, q)
+
+def yiq_to_rgb(y, i, q):
+ r = y + 0.948262*i + 0.624013*q
+ g = y - 0.276066*i - 0.639810*q
+ b = y - 1.105450*i + 1.729860*q
+ if r < 0.0: r = 0.0
+ if g < 0.0: g = 0.0
+ if b < 0.0: b = 0.0
+ if r > 1.0: r = 1.0
+ if g > 1.0: g = 1.0
+ if b > 1.0: b = 1.0
+ return (r, g, b)
+
+
+# HLS: Hue, Luminance, Saturation
+# H: position in the spectrum
+# L: color lightness
+# S: color saturation
+
+def rgb_to_hls(r, g, b):
+ maxc = max(r, g, b)
+ minc = min(r, g, b)
+ # XXX Can optimize (maxc+minc) and (maxc-minc)
+ l = (minc+maxc)/2.0
+ if minc == maxc: return 0.0, l, 0.0
+ if l <= 0.5: s = (maxc-minc) / (maxc+minc)
+ else: s = (maxc-minc) / (2.0-maxc-minc)
+ rc = (maxc-r) / (maxc-minc)
+ gc = (maxc-g) / (maxc-minc)
+ bc = (maxc-b) / (maxc-minc)
+ if r == maxc: h = bc-gc
+ elif g == maxc: h = 2.0+rc-bc
+ else: h = 4.0+gc-rc
+ h = (h/6.0) % 1.0
+ return h, l, s
+
+def hls_to_rgb(h, l, s):
+ if s == 0.0: return l, l, l
+ if l <= 0.5: m2 = l * (1.0+s)
+ else: m2 = l+s-(l*s)
+ m1 = 2.0*l - m2
+ return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD))
+
+def _v(m1, m2, hue):
+ hue = hue % 1.0
+ if hue < ONE_SIXTH: return m1 + (m2-m1)*hue*6.0
+ if hue < 0.5: return m2
+ if hue < TWO_THIRD: return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0
+ return m1
+
+
+# HSV: Hue, Saturation, Value
+# H: position in the spectrum
+# S: color saturation ("purity")
+# V: color brightness
+
+def rgb_to_hsv(r, g, b):
+ maxc = max(r, g, b)
+ minc = min(r, g, b)
+ v = maxc
+ if minc == maxc: return 0.0, 0.0, v
+ s = (maxc-minc) / maxc
+ rc = (maxc-r) / (maxc-minc)
+ gc = (maxc-g) / (maxc-minc)
+ bc = (maxc-b) / (maxc-minc)
+ if r == maxc: h = bc-gc
+ elif g == maxc: h = 2.0+rc-bc
+ else: h = 4.0+gc-rc
+ h = (h/6.0) % 1.0
+ return h, s, v
+
+def hsv_to_rgb(h, s, v):
+ if s == 0.0: return v, v, v
+ i = int(h*6.0) # XXX assume int() truncates!
+ f = (h*6.0) - i
+ p = v*(1.0 - s)
+ q = v*(1.0 - s*f)
+ t = v*(1.0 - s*(1.0-f))
+ if i%6 == 0: return v, t, p
+ if i == 1: return q, v, p
+ if i == 2: return p, v, t
+ if i == 3: return p, q, v
+ if i == 4: return t, p, v
+ if i == 5: return v, p, q
+ # Cannot get here
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/commands.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,84 @@
+"""Execute shell commands via os.popen() and return status, output.
+
+Interface summary:
+
+ import commands
+
+ outtext = commands.getoutput(cmd)
+ (exitstatus, outtext) = commands.getstatusoutput(cmd)
+ outtext = commands.getstatus(file) # returns output of "ls -ld file"
+
+A trailing newline is removed from the output string.
+
+Encapsulates the basic operation:
+
+ pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
+ text = pipe.read()
+ sts = pipe.close()
+
+ [Note: it would be nice to add functions to interpret the exit status.]
+"""
+
+__all__ = ["getstatusoutput","getoutput","getstatus"]
+
+# Module 'commands'
+#
+# Various tools for executing commands and looking at their output and status.
+#
+# NB This only works (and is only relevant) for UNIX.
+
+
+# Get 'ls -l' status for an object into a string
+#
+def getstatus(file):
+ """Return output of "ls -ld <file>" in a string."""
+ return getoutput('ls -ld' + mkarg(file))
+
+
+# Get the output from a shell command into a string.
+# The exit status is ignored; a trailing newline is stripped.
+# Assume the command will work with '{ ... ; } 2>&1' around it..
+#
+def getoutput(cmd):
+ """Return output (stdout or stderr) of executing cmd in a shell."""
+ return getstatusoutput(cmd)[1]
+
+
+# Ditto but preserving the exit status.
+# Returns a pair (sts, output)
+#
+def getstatusoutput(cmd):
+ """Return (status, output) of executing cmd in a shell."""
+ import os
+ pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
+ text = pipe.read()
+ sts = pipe.close()
+ if sts is None: sts = 0
+ if text[-1:] == '\n': text = text[:-1]
+ return sts, text
+
+
+# Make command argument from directory and pathname (prefix space, add quotes).
+#
+def mk2arg(head, x):
+ import os
+ return mkarg(os.path.join(head, x))
+
+
+# Make a shell command argument from a string.
+# Return a string beginning with a space followed by a shell-quoted
+# version of the argument.
+# Two strategies: enclose in single quotes if it contains none;
+# otherwise, enclose in double quotes and prefix quotable characters
+# with backslash.
+#
+def mkarg(x):
+ if '\'' not in x:
+ return ' \'' + x + '\''
+ s = ' "'
+ for c in x:
+ if c in '\\$"`':
+ s = s + '\\'
+ s = s + c
+ s = s + '"'
+ return s
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/compileall.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,157 @@
+"""Module/script to "compile" all .py files to .pyc (or .pyo) file.
+
+When called as a script with arguments, this compiles the directories
+given as arguments recursively; the -l option prevents it from
+recursing into directories.
+
+Without arguments, if compiles all modules on sys.path, without
+recursing into subdirectories. (Even though it should do so for
+packages -- for now, you'll have to deal with packages separately.)
+
+See module py_compile for details of the actual byte-compilation.
+
+"""
+
+import os
+import sys
+import py_compile
+
+__all__ = ["compile_dir","compile_path"]
+
+def compile_dir(dir, maxlevels=10, ddir=None,
+ force=0, rx=None, quiet=0):
+ """Byte-compile all modules in the given directory tree.
+
+ Arguments (only dir is required):
+
+ dir: the directory to byte-compile
+ maxlevels: maximum recursion level (default 10)
+ ddir: if given, purported directory name (this is the
+ directory name that will show up in error messages)
+ force: if 1, force compilation, even if timestamps are up-to-date
+ quiet: if 1, be quiet during compilation
+
+ """
+ if not quiet:
+ print 'Listing', dir, '...'
+ try:
+ names = os.listdir(dir)
+ except os.error:
+ print "Can't list", dir
+ names = []
+ names.sort()
+ success = 1
+ for name in names:
+ fullname = os.path.join(dir, name)
+ if ddir is not None:
+ dfile = os.path.join(ddir, name)
+ else:
+ dfile = None
+ if rx is not None:
+ mo = rx.search(fullname)
+ if mo:
+ continue
+ if os.path.isfile(fullname):
+ head, tail = name[:-3], name[-3:]
+ if tail == '.py':
+ cfile = fullname + (__debug__ and 'c' or 'o')
+ ftime = os.stat(fullname).st_mtime
+ try: ctime = os.stat(cfile).st_mtime
+ except os.error: ctime = 0
+ if (ctime > ftime) and not force: continue
+ if not quiet:
+ print 'Compiling', fullname, '...'
+ try:
+ ok = py_compile.compile(fullname, None, dfile, True)
+ except KeyboardInterrupt:
+ raise KeyboardInterrupt
+ except py_compile.PyCompileError,err:
+ if quiet:
+ print 'Compiling', fullname, '...'
+ print err.msg
+ success = 0
+ except IOError, e:
+ print "Sorry", e
+ success = 0
+ else:
+ if ok == 0:
+ success = 0
+ elif maxlevels > 0 and \
+ name != os.curdir and name != os.pardir and \
+ os.path.isdir(fullname) and \
+ not os.path.islink(fullname):
+ if not compile_dir(fullname, maxlevels - 1, dfile, force, rx, quiet):
+ success = 0
+ return success
+
+def compile_path(skip_curdir=1, maxlevels=0, force=0, quiet=0):
+ """Byte-compile all module on sys.path.
+
+ Arguments (all optional):
+
+ skip_curdir: if true, skip current directory (default true)
+ maxlevels: max recursion level (default 0)
+ force: as for compile_dir() (default 0)
+ quiet: as for compile_dir() (default 0)
+
+ """
+ success = 1
+ for dir in sys.path:
+ if (not dir or dir == os.curdir) and skip_curdir:
+ print 'Skipping current directory'
+ else:
+ success = success and compile_dir(dir, maxlevels, None,
+ force, quiet=quiet)
+ return success
+
+def main():
+ """Script main program."""
+ import getopt
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], 'lfqd:x:')
+ except getopt.error, msg:
+ print msg
+ print "usage: python compileall.py [-l] [-f] [-q] [-d destdir] " \
+ "[-x regexp] [directory ...]"
+ print "-l: don't recurse down"
+ print "-f: force rebuild even if timestamps are up-to-date"
+ print "-q: quiet operation"
+ print "-d destdir: purported directory name for error messages"
+ print " if no directory arguments, -l sys.path is assumed"
+ print "-x regexp: skip files matching the regular expression regexp"
+ print " the regexp is search for in the full path of the file"
+ sys.exit(2)
+ maxlevels = 10
+ ddir = None
+ force = 0
+ quiet = 0
+ rx = None
+ for o, a in opts:
+ if o == '-l': maxlevels = 0
+ if o == '-d': ddir = a
+ if o == '-f': force = 1
+ if o == '-q': quiet = 1
+ if o == '-x':
+ import re
+ rx = re.compile(a)
+ if ddir:
+ if len(args) != 1:
+ print "-d destdir require exactly one directory argument"
+ sys.exit(2)
+ success = 1
+ try:
+ if args:
+ for dir in args:
+ if not compile_dir(dir, maxlevels, ddir,
+ force, rx, quiet):
+ success = 0
+ else:
+ success = compile_path()
+ except KeyboardInterrupt:
+ print "\n[interrupt]"
+ success = 0
+ return success
+
+if __name__ == '__main__':
+ exit_status = int(not main())
+ sys.exit(exit_status)
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/compiler/__init__.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,26 @@
+"""Package for parsing and compiling Python source code
+
+There are several functions defined at the top level that are imported
+from modules contained in the package.
+
+parse(buf, mode="exec") -> AST
+ Converts a string containing Python source code to an abstract
+ syntax tree (AST). The AST is defined in compiler.ast.
+
+parseFile(path) -> AST
+ The same as parse(open(path))
+
+walk(ast, visitor, verbose=None)
+ Does a pre-order walk over the ast using the visitor instance.
+ See compiler.visitor for details.
+
+compile(source, filename, mode, flags=None, dont_inherit=None)
+ Returns a code object. A replacement for the builtin compile() function.
+
+compileFile(filename)
+ Generates a .pyc file by compiling filename.
+"""
+
+from compiler.transformer import parse, parseFile
+from compiler.visitor import walk
+from compiler.pycodegen import compile, compileFile
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/compiler/ast.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,1356 @@
+"""Python abstract syntax node definitions
+
+This file is automatically generated by Tools/compiler/astgen.py
+"""
+from compiler.consts import CO_VARARGS, CO_VARKEYWORDS
+
+def flatten(seq):
+ l = []
+ for elt in seq:
+ t = type(elt)
+ if t is tuple or t is list:
+ for elt2 in flatten(elt):
+ l.append(elt2)
+ else:
+ l.append(elt)
+ return l
+
+def flatten_nodes(seq):
+ return [n for n in flatten(seq) if isinstance(n, Node)]
+
+nodes = {}
+
+class Node:
+ """Abstract base class for ast nodes."""
+ def getChildren(self):
+ pass # implemented by subclasses
+ def __iter__(self):
+ for n in self.getChildren():
+ yield n
+ def asList(self): # for backwards compatibility
+ return self.getChildren()
+ def getChildNodes(self):
+ pass # implemented by subclasses
+
+class EmptyNode(Node):
+ pass
+
+class Expression(Node):
+ # Expression is an artificial node class to support "eval"
+ nodes["expression"] = "Expression"
+ def __init__(self, node):
+ self.node = node
+
+ def getChildren(self):
+ return self.node,
+
+ def getChildNodes(self):
+ return self.node,
+
+ def __repr__(self):
+ return "Expression(%s)" % (repr(self.node))
+
+class Add(Node):
+ def __init__(self, (left, right), lineno=None):
+ self.left = left
+ self.right = right
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.left, self.right
+
+ def getChildNodes(self):
+ return self.left, self.right
+
+ def __repr__(self):
+ return "Add((%s, %s))" % (repr(self.left), repr(self.right))
+
+class And(Node):
+ def __init__(self, nodes, lineno=None):
+ self.nodes = nodes
+ self.lineno = lineno
+
+ def getChildren(self):
+ return tuple(flatten(self.nodes))
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.extend(flatten_nodes(self.nodes))
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "And(%s)" % (repr(self.nodes),)
+
+class AssAttr(Node):
+ def __init__(self, expr, attrname, flags, lineno=None):
+ self.expr = expr
+ self.attrname = attrname
+ self.flags = flags
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.expr, self.attrname, self.flags
+
+ def getChildNodes(self):
+ return self.expr,
+
+ def __repr__(self):
+ return "AssAttr(%s, %s, %s)" % (repr(self.expr), repr(self.attrname), repr(self.flags))
+
+class AssList(Node):
+ def __init__(self, nodes, lineno=None):
+ self.nodes = nodes
+ self.lineno = lineno
+
+ def getChildren(self):
+ return tuple(flatten(self.nodes))
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.extend(flatten_nodes(self.nodes))
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "AssList(%s)" % (repr(self.nodes),)
+
+class AssName(Node):
+ def __init__(self, name, flags, lineno=None):
+ self.name = name
+ self.flags = flags
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.name, self.flags
+
+ def getChildNodes(self):
+ return ()
+
+ def __repr__(self):
+ return "AssName(%s, %s)" % (repr(self.name), repr(self.flags))
+
+class AssTuple(Node):
+ def __init__(self, nodes, lineno=None):
+ self.nodes = nodes
+ self.lineno = lineno
+
+ def getChildren(self):
+ return tuple(flatten(self.nodes))
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.extend(flatten_nodes(self.nodes))
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "AssTuple(%s)" % (repr(self.nodes),)
+
+class Assert(Node):
+ def __init__(self, test, fail, lineno=None):
+ self.test = test
+ self.fail = fail
+ self.lineno = lineno
+
+ def getChildren(self):
+ children = []
+ children.append(self.test)
+ children.append(self.fail)
+ return tuple(children)
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.append(self.test)
+ if self.fail is not None:
+ nodelist.append(self.fail)
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "Assert(%s, %s)" % (repr(self.test), repr(self.fail))
+
+class Assign(Node):
+ def __init__(self, nodes, expr, lineno=None):
+ self.nodes = nodes
+ self.expr = expr
+ self.lineno = lineno
+
+ def getChildren(self):
+ children = []
+ children.extend(flatten(self.nodes))
+ children.append(self.expr)
+ return tuple(children)
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.extend(flatten_nodes(self.nodes))
+ nodelist.append(self.expr)
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "Assign(%s, %s)" % (repr(self.nodes), repr(self.expr))
+
+class AugAssign(Node):
+ def __init__(self, node, op, expr, lineno=None):
+ self.node = node
+ self.op = op
+ self.expr = expr
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.node, self.op, self.expr
+
+ def getChildNodes(self):
+ return self.node, self.expr
+
+ def __repr__(self):
+ return "AugAssign(%s, %s, %s)" % (repr(self.node), repr(self.op), repr(self.expr))
+
+class Backquote(Node):
+ def __init__(self, expr, lineno=None):
+ self.expr = expr
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.expr,
+
+ def getChildNodes(self):
+ return self.expr,
+
+ def __repr__(self):
+ return "Backquote(%s)" % (repr(self.expr),)
+
+class Bitand(Node):
+ def __init__(self, nodes, lineno=None):
+ self.nodes = nodes
+ self.lineno = lineno
+
+ def getChildren(self):
+ return tuple(flatten(self.nodes))
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.extend(flatten_nodes(self.nodes))
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "Bitand(%s)" % (repr(self.nodes),)
+
+class Bitor(Node):
+ def __init__(self, nodes, lineno=None):
+ self.nodes = nodes
+ self.lineno = lineno
+
+ def getChildren(self):
+ return tuple(flatten(self.nodes))
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.extend(flatten_nodes(self.nodes))
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "Bitor(%s)" % (repr(self.nodes),)
+
+class Bitxor(Node):
+ def __init__(self, nodes, lineno=None):
+ self.nodes = nodes
+ self.lineno = lineno
+
+ def getChildren(self):
+ return tuple(flatten(self.nodes))
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.extend(flatten_nodes(self.nodes))
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "Bitxor(%s)" % (repr(self.nodes),)
+
+class Break(Node):
+ def __init__(self, lineno=None):
+ self.lineno = lineno
+
+ def getChildren(self):
+ return ()
+
+ def getChildNodes(self):
+ return ()
+
+ def __repr__(self):
+ return "Break()"
+
+class CallFunc(Node):
+ def __init__(self, node, args, star_args = None, dstar_args = None, lineno=None):
+ self.node = node
+ self.args = args
+ self.star_args = star_args
+ self.dstar_args = dstar_args
+ self.lineno = lineno
+
+ def getChildren(self):
+ children = []
+ children.append(self.node)
+ children.extend(flatten(self.args))
+ children.append(self.star_args)
+ children.append(self.dstar_args)
+ return tuple(children)
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.append(self.node)
+ nodelist.extend(flatten_nodes(self.args))
+ if self.star_args is not None:
+ nodelist.append(self.star_args)
+ if self.dstar_args is not None:
+ nodelist.append(self.dstar_args)
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "CallFunc(%s, %s, %s, %s)" % (repr(self.node), repr(self.args), repr(self.star_args), repr(self.dstar_args))
+
+class Class(Node):
+ def __init__(self, name, bases, doc, code, lineno=None):
+ self.name = name
+ self.bases = bases
+ self.doc = doc
+ self.code = code
+ self.lineno = lineno
+
+ def getChildren(self):
+ children = []
+ children.append(self.name)
+ children.extend(flatten(self.bases))
+ children.append(self.doc)
+ children.append(self.code)
+ return tuple(children)
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.extend(flatten_nodes(self.bases))
+ nodelist.append(self.code)
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "Class(%s, %s, %s, %s)" % (repr(self.name), repr(self.bases), repr(self.doc), repr(self.code))
+
+class Compare(Node):
+ def __init__(self, expr, ops, lineno=None):
+ self.expr = expr
+ self.ops = ops
+ self.lineno = lineno
+
+ def getChildren(self):
+ children = []
+ children.append(self.expr)
+ children.extend(flatten(self.ops))
+ return tuple(children)
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.append(self.expr)
+ nodelist.extend(flatten_nodes(self.ops))
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "Compare(%s, %s)" % (repr(self.expr), repr(self.ops))
+
+class Const(Node):
+ def __init__(self, value, lineno=None):
+ self.value = value
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.value,
+
+ def getChildNodes(self):
+ return ()
+
+ def __repr__(self):
+ return "Const(%s)" % (repr(self.value),)
+
+class Continue(Node):
+ def __init__(self, lineno=None):
+ self.lineno = lineno
+
+ def getChildren(self):
+ return ()
+
+ def getChildNodes(self):
+ return ()
+
+ def __repr__(self):
+ return "Continue()"
+
+class Decorators(Node):
+ def __init__(self, nodes, lineno=None):
+ self.nodes = nodes
+ self.lineno = lineno
+
+ def getChildren(self):
+ return tuple(flatten(self.nodes))
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.extend(flatten_nodes(self.nodes))
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "Decorators(%s)" % (repr(self.nodes),)
+
+class Dict(Node):
+ def __init__(self, items, lineno=None):
+ self.items = items
+ self.lineno = lineno
+
+ def getChildren(self):
+ return tuple(flatten(self.items))
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.extend(flatten_nodes(self.items))
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "Dict(%s)" % (repr(self.items),)
+
+class Discard(Node):
+ def __init__(self, expr, lineno=None):
+ self.expr = expr
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.expr,
+
+ def getChildNodes(self):
+ return self.expr,
+
+ def __repr__(self):
+ return "Discard(%s)" % (repr(self.expr),)
+
+class Div(Node):
+ def __init__(self, (left, right), lineno=None):
+ self.left = left
+ self.right = right
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.left, self.right
+
+ def getChildNodes(self):
+ return self.left, self.right
+
+ def __repr__(self):
+ return "Div((%s, %s))" % (repr(self.left), repr(self.right))
+
+class Ellipsis(Node):
+ def __init__(self, lineno=None):
+ self.lineno = lineno
+
+ def getChildren(self):
+ return ()
+
+ def getChildNodes(self):
+ return ()
+
+ def __repr__(self):
+ return "Ellipsis()"
+
+class Exec(Node):
+ def __init__(self, expr, locals, globals, lineno=None):
+ self.expr = expr
+ self.locals = locals
+ self.globals = globals
+ self.lineno = lineno
+
+ def getChildren(self):
+ children = []
+ children.append(self.expr)
+ children.append(self.locals)
+ children.append(self.globals)
+ return tuple(children)
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.append(self.expr)
+ if self.locals is not None:
+ nodelist.append(self.locals)
+ if self.globals is not None:
+ nodelist.append(self.globals)
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "Exec(%s, %s, %s)" % (repr(self.expr), repr(self.locals), repr(self.globals))
+
+class FloorDiv(Node):
+ def __init__(self, (left, right), lineno=None):
+ self.left = left
+ self.right = right
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.left, self.right
+
+ def getChildNodes(self):
+ return self.left, self.right
+
+ def __repr__(self):
+ return "FloorDiv((%s, %s))" % (repr(self.left), repr(self.right))
+
+class For(Node):
+ def __init__(self, assign, list, body, else_, lineno=None):
+ self.assign = assign
+ self.list = list
+ self.body = body
+ self.else_ = else_
+ self.lineno = lineno
+
+ def getChildren(self):
+ children = []
+ children.append(self.assign)
+ children.append(self.list)
+ children.append(self.body)
+ children.append(self.else_)
+ return tuple(children)
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.append(self.assign)
+ nodelist.append(self.list)
+ nodelist.append(self.body)
+ if self.else_ is not None:
+ nodelist.append(self.else_)
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "For(%s, %s, %s, %s)" % (repr(self.assign), repr(self.list), repr(self.body), repr(self.else_))
+
+class From(Node):
+ def __init__(self, modname, names, level, lineno=None):
+ self.modname = modname
+ self.names = names
+ self.level = level
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.modname, self.names, self.level
+
+ def getChildNodes(self):
+ return ()
+
+ def __repr__(self):
+ return "From(%s, %s, %s)" % (repr(self.modname), repr(self.names), repr(self.level))
+
+class Function(Node):
+ def __init__(self, decorators, name, argnames, defaults, flags, doc, code, lineno=None):
+ self.decorators = decorators
+ self.name = name
+ self.argnames = argnames
+ self.defaults = defaults
+ self.flags = flags
+ self.doc = doc
+ self.code = code
+ self.lineno = lineno
+ self.varargs = self.kwargs = None
+ if flags & CO_VARARGS:
+ self.varargs = 1
+ if flags & CO_VARKEYWORDS:
+ self.kwargs = 1
+
+
+
+ def getChildren(self):
+ children = []
+ children.append(self.decorators)
+ children.append(self.name)
+ children.append(self.argnames)
+ children.extend(flatten(self.defaults))
+ children.append(self.flags)
+ children.append(self.doc)
+ children.append(self.code)
+ return tuple(children)
+
+ def getChildNodes(self):
+ nodelist = []
+ if self.decorators is not None:
+ nodelist.append(self.decorators)
+ nodelist.extend(flatten_nodes(self.defaults))
+ nodelist.append(self.code)
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "Function(%s, %s, %s, %s, %s, %s, %s)" % (repr(self.decorators), repr(self.name), repr(self.argnames), repr(self.defaults), repr(self.flags), repr(self.doc), repr(self.code))
+
+class GenExpr(Node):
+ def __init__(self, code, lineno=None):
+ self.code = code
+ self.lineno = lineno
+ self.argnames = ['.0']
+ self.varargs = self.kwargs = None
+
+ def getChildren(self):
+ return self.code,
+
+ def getChildNodes(self):
+ return self.code,
+
+ def __repr__(self):
+ return "GenExpr(%s)" % (repr(self.code),)
+
+class GenExprFor(Node):
+ def __init__(self, assign, iter, ifs, lineno=None):
+ self.assign = assign
+ self.iter = iter
+ self.ifs = ifs
+ self.lineno = lineno
+ self.is_outmost = False
+
+
+ def getChildren(self):
+ children = []
+ children.append(self.assign)
+ children.append(self.iter)
+ children.extend(flatten(self.ifs))
+ return tuple(children)
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.append(self.assign)
+ nodelist.append(self.iter)
+ nodelist.extend(flatten_nodes(self.ifs))
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "GenExprFor(%s, %s, %s)" % (repr(self.assign), repr(self.iter), repr(self.ifs))
+
+class GenExprIf(Node):
+ def __init__(self, test, lineno=None):
+ self.test = test
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.test,
+
+ def getChildNodes(self):
+ return self.test,
+
+ def __repr__(self):
+ return "GenExprIf(%s)" % (repr(self.test),)
+
+class GenExprInner(Node):
+ def __init__(self, expr, quals, lineno=None):
+ self.expr = expr
+ self.quals = quals
+ self.lineno = lineno
+
+ def getChildren(self):
+ children = []
+ children.append(self.expr)
+ children.extend(flatten(self.quals))
+ return tuple(children)
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.append(self.expr)
+ nodelist.extend(flatten_nodes(self.quals))
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "GenExprInner(%s, %s)" % (repr(self.expr), repr(self.quals))
+
+class Getattr(Node):
+ def __init__(self, expr, attrname, lineno=None):
+ self.expr = expr
+ self.attrname = attrname
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.expr, self.attrname
+
+ def getChildNodes(self):
+ return self.expr,
+
+ def __repr__(self):
+ return "Getattr(%s, %s)" % (repr(self.expr), repr(self.attrname))
+
+class Global(Node):
+ def __init__(self, names, lineno=None):
+ self.names = names
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.names,
+
+ def getChildNodes(self):
+ return ()
+
+ def __repr__(self):
+ return "Global(%s)" % (repr(self.names),)
+
+class If(Node):
+ def __init__(self, tests, else_, lineno=None):
+ self.tests = tests
+ self.else_ = else_
+ self.lineno = lineno
+
+ def getChildren(self):
+ children = []
+ children.extend(flatten(self.tests))
+ children.append(self.else_)
+ return tuple(children)
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.extend(flatten_nodes(self.tests))
+ if self.else_ is not None:
+ nodelist.append(self.else_)
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "If(%s, %s)" % (repr(self.tests), repr(self.else_))
+
+class IfExp(Node):
+ def __init__(self, test, then, else_, lineno=None):
+ self.test = test
+ self.then = then
+ self.else_ = else_
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.test, self.then, self.else_
+
+ def getChildNodes(self):
+ return self.test, self.then, self.else_
+
+ def __repr__(self):
+ return "IfExp(%s, %s, %s)" % (repr(self.test), repr(self.then), repr(self.else_))
+
+class Import(Node):
+ def __init__(self, names, lineno=None):
+ self.names = names
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.names,
+
+ def getChildNodes(self):
+ return ()
+
+ def __repr__(self):
+ return "Import(%s)" % (repr(self.names),)
+
+class Invert(Node):
+ def __init__(self, expr, lineno=None):
+ self.expr = expr
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.expr,
+
+ def getChildNodes(self):
+ return self.expr,
+
+ def __repr__(self):
+ return "Invert(%s)" % (repr(self.expr),)
+
+class Keyword(Node):
+ def __init__(self, name, expr, lineno=None):
+ self.name = name
+ self.expr = expr
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.name, self.expr
+
+ def getChildNodes(self):
+ return self.expr,
+
+ def __repr__(self):
+ return "Keyword(%s, %s)" % (repr(self.name), repr(self.expr))
+
+class Lambda(Node):
+ def __init__(self, argnames, defaults, flags, code, lineno=None):
+ self.argnames = argnames
+ self.defaults = defaults
+ self.flags = flags
+ self.code = code
+ self.lineno = lineno
+ self.varargs = self.kwargs = None
+ if flags & CO_VARARGS:
+ self.varargs = 1
+ if flags & CO_VARKEYWORDS:
+ self.kwargs = 1
+
+
+
+ def getChildren(self):
+ children = []
+ children.append(self.argnames)
+ children.extend(flatten(self.defaults))
+ children.append(self.flags)
+ children.append(self.code)
+ return tuple(children)
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.extend(flatten_nodes(self.defaults))
+ nodelist.append(self.code)
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "Lambda(%s, %s, %s, %s)" % (repr(self.argnames), repr(self.defaults), repr(self.flags), repr(self.code))
+
+class LeftShift(Node):
+ def __init__(self, (left, right), lineno=None):
+ self.left = left
+ self.right = right
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.left, self.right
+
+ def getChildNodes(self):
+ return self.left, self.right
+
+ def __repr__(self):
+ return "LeftShift((%s, %s))" % (repr(self.left), repr(self.right))
+
+class List(Node):
+ def __init__(self, nodes, lineno=None):
+ self.nodes = nodes
+ self.lineno = lineno
+
+ def getChildren(self):
+ return tuple(flatten(self.nodes))
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.extend(flatten_nodes(self.nodes))
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "List(%s)" % (repr(self.nodes),)
+
+class ListComp(Node):
+ def __init__(self, expr, quals, lineno=None):
+ self.expr = expr
+ self.quals = quals
+ self.lineno = lineno
+
+ def getChildren(self):
+ children = []
+ children.append(self.expr)
+ children.extend(flatten(self.quals))
+ return tuple(children)
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.append(self.expr)
+ nodelist.extend(flatten_nodes(self.quals))
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "ListComp(%s, %s)" % (repr(self.expr), repr(self.quals))
+
+class ListCompFor(Node):
+ def __init__(self, assign, list, ifs, lineno=None):
+ self.assign = assign
+ self.list = list
+ self.ifs = ifs
+ self.lineno = lineno
+
+ def getChildren(self):
+ children = []
+ children.append(self.assign)
+ children.append(self.list)
+ children.extend(flatten(self.ifs))
+ return tuple(children)
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.append(self.assign)
+ nodelist.append(self.list)
+ nodelist.extend(flatten_nodes(self.ifs))
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "ListCompFor(%s, %s, %s)" % (repr(self.assign), repr(self.list), repr(self.ifs))
+
+class ListCompIf(Node):
+ def __init__(self, test, lineno=None):
+ self.test = test
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.test,
+
+ def getChildNodes(self):
+ return self.test,
+
+ def __repr__(self):
+ return "ListCompIf(%s)" % (repr(self.test),)
+
+class Mod(Node):
+ def __init__(self, (left, right), lineno=None):
+ self.left = left
+ self.right = right
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.left, self.right
+
+ def getChildNodes(self):
+ return self.left, self.right
+
+ def __repr__(self):
+ return "Mod((%s, %s))" % (repr(self.left), repr(self.right))
+
+class Module(Node):
+ def __init__(self, doc, node, lineno=None):
+ self.doc = doc
+ self.node = node
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.doc, self.node
+
+ def getChildNodes(self):
+ return self.node,
+
+ def __repr__(self):
+ return "Module(%s, %s)" % (repr(self.doc), repr(self.node))
+
+class Mul(Node):
+ def __init__(self, (left, right), lineno=None):
+ self.left = left
+ self.right = right
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.left, self.right
+
+ def getChildNodes(self):
+ return self.left, self.right
+
+ def __repr__(self):
+ return "Mul((%s, %s))" % (repr(self.left), repr(self.right))
+
+class Name(Node):
+ def __init__(self, name, lineno=None):
+ self.name = name
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.name,
+
+ def getChildNodes(self):
+ return ()
+
+ def __repr__(self):
+ return "Name(%s)" % (repr(self.name),)
+
+class Not(Node):
+ def __init__(self, expr, lineno=None):
+ self.expr = expr
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.expr,
+
+ def getChildNodes(self):
+ return self.expr,
+
+ def __repr__(self):
+ return "Not(%s)" % (repr(self.expr),)
+
+class Or(Node):
+ def __init__(self, nodes, lineno=None):
+ self.nodes = nodes
+ self.lineno = lineno
+
+ def getChildren(self):
+ return tuple(flatten(self.nodes))
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.extend(flatten_nodes(self.nodes))
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "Or(%s)" % (repr(self.nodes),)
+
+class Pass(Node):
+ def __init__(self, lineno=None):
+ self.lineno = lineno
+
+ def getChildren(self):
+ return ()
+
+ def getChildNodes(self):
+ return ()
+
+ def __repr__(self):
+ return "Pass()"
+
+class Power(Node):
+ def __init__(self, (left, right), lineno=None):
+ self.left = left
+ self.right = right
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.left, self.right
+
+ def getChildNodes(self):
+ return self.left, self.right
+
+ def __repr__(self):
+ return "Power((%s, %s))" % (repr(self.left), repr(self.right))
+
+class Print(Node):
+ def __init__(self, nodes, dest, lineno=None):
+ self.nodes = nodes
+ self.dest = dest
+ self.lineno = lineno
+
+ def getChildren(self):
+ children = []
+ children.extend(flatten(self.nodes))
+ children.append(self.dest)
+ return tuple(children)
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.extend(flatten_nodes(self.nodes))
+ if self.dest is not None:
+ nodelist.append(self.dest)
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "Print(%s, %s)" % (repr(self.nodes), repr(self.dest))
+
+class Printnl(Node):
+ def __init__(self, nodes, dest, lineno=None):
+ self.nodes = nodes
+ self.dest = dest
+ self.lineno = lineno
+
+ def getChildren(self):
+ children = []
+ children.extend(flatten(self.nodes))
+ children.append(self.dest)
+ return tuple(children)
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.extend(flatten_nodes(self.nodes))
+ if self.dest is not None:
+ nodelist.append(self.dest)
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "Printnl(%s, %s)" % (repr(self.nodes), repr(self.dest))
+
+class Raise(Node):
+ def __init__(self, expr1, expr2, expr3, lineno=None):
+ self.expr1 = expr1
+ self.expr2 = expr2
+ self.expr3 = expr3
+ self.lineno = lineno
+
+ def getChildren(self):
+ children = []
+ children.append(self.expr1)
+ children.append(self.expr2)
+ children.append(self.expr3)
+ return tuple(children)
+
+ def getChildNodes(self):
+ nodelist = []
+ if self.expr1 is not None:
+ nodelist.append(self.expr1)
+ if self.expr2 is not None:
+ nodelist.append(self.expr2)
+ if self.expr3 is not None:
+ nodelist.append(self.expr3)
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "Raise(%s, %s, %s)" % (repr(self.expr1), repr(self.expr2), repr(self.expr3))
+
+class Return(Node):
+ def __init__(self, value, lineno=None):
+ self.value = value
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.value,
+
+ def getChildNodes(self):
+ return self.value,
+
+ def __repr__(self):
+ return "Return(%s)" % (repr(self.value),)
+
+class RightShift(Node):
+ def __init__(self, (left, right), lineno=None):
+ self.left = left
+ self.right = right
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.left, self.right
+
+ def getChildNodes(self):
+ return self.left, self.right
+
+ def __repr__(self):
+ return "RightShift((%s, %s))" % (repr(self.left), repr(self.right))
+
+class Slice(Node):
+ def __init__(self, expr, flags, lower, upper, lineno=None):
+ self.expr = expr
+ self.flags = flags
+ self.lower = lower
+ self.upper = upper
+ self.lineno = lineno
+
+ def getChildren(self):
+ children = []
+ children.append(self.expr)
+ children.append(self.flags)
+ children.append(self.lower)
+ children.append(self.upper)
+ return tuple(children)
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.append(self.expr)
+ if self.lower is not None:
+ nodelist.append(self.lower)
+ if self.upper is not None:
+ nodelist.append(self.upper)
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "Slice(%s, %s, %s, %s)" % (repr(self.expr), repr(self.flags), repr(self.lower), repr(self.upper))
+
+class Sliceobj(Node):
+ def __init__(self, nodes, lineno=None):
+ self.nodes = nodes
+ self.lineno = lineno
+
+ def getChildren(self):
+ return tuple(flatten(self.nodes))
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.extend(flatten_nodes(self.nodes))
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "Sliceobj(%s)" % (repr(self.nodes),)
+
+class Stmt(Node):
+ def __init__(self, nodes, lineno=None):
+ self.nodes = nodes
+ self.lineno = lineno
+
+ def getChildren(self):
+ return tuple(flatten(self.nodes))
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.extend(flatten_nodes(self.nodes))
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "Stmt(%s)" % (repr(self.nodes),)
+
+class Sub(Node):
+ def __init__(self, (left, right), lineno=None):
+ self.left = left
+ self.right = right
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.left, self.right
+
+ def getChildNodes(self):
+ return self.left, self.right
+
+ def __repr__(self):
+ return "Sub((%s, %s))" % (repr(self.left), repr(self.right))
+
+class Subscript(Node):
+ def __init__(self, expr, flags, subs, lineno=None):
+ self.expr = expr
+ self.flags = flags
+ self.subs = subs
+ self.lineno = lineno
+
+ def getChildren(self):
+ children = []
+ children.append(self.expr)
+ children.append(self.flags)
+ children.extend(flatten(self.subs))
+ return tuple(children)
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.append(self.expr)
+ nodelist.extend(flatten_nodes(self.subs))
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "Subscript(%s, %s, %s)" % (repr(self.expr), repr(self.flags), repr(self.subs))
+
+class TryExcept(Node):
+ def __init__(self, body, handlers, else_, lineno=None):
+ self.body = body
+ self.handlers = handlers
+ self.else_ = else_
+ self.lineno = lineno
+
+ def getChildren(self):
+ children = []
+ children.append(self.body)
+ children.extend(flatten(self.handlers))
+ children.append(self.else_)
+ return tuple(children)
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.append(self.body)
+ nodelist.extend(flatten_nodes(self.handlers))
+ if self.else_ is not None:
+ nodelist.append(self.else_)
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "TryExcept(%s, %s, %s)" % (repr(self.body), repr(self.handlers), repr(self.else_))
+
+class TryFinally(Node):
+ def __init__(self, body, final, lineno=None):
+ self.body = body
+ self.final = final
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.body, self.final
+
+ def getChildNodes(self):
+ return self.body, self.final
+
+ def __repr__(self):
+ return "TryFinally(%s, %s)" % (repr(self.body), repr(self.final))
+
+class Tuple(Node):
+ def __init__(self, nodes, lineno=None):
+ self.nodes = nodes
+ self.lineno = lineno
+
+ def getChildren(self):
+ return tuple(flatten(self.nodes))
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.extend(flatten_nodes(self.nodes))
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "Tuple(%s)" % (repr(self.nodes),)
+
+class UnaryAdd(Node):
+ def __init__(self, expr, lineno=None):
+ self.expr = expr
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.expr,
+
+ def getChildNodes(self):
+ return self.expr,
+
+ def __repr__(self):
+ return "UnaryAdd(%s)" % (repr(self.expr),)
+
+class UnarySub(Node):
+ def __init__(self, expr, lineno=None):
+ self.expr = expr
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.expr,
+
+ def getChildNodes(self):
+ return self.expr,
+
+ def __repr__(self):
+ return "UnarySub(%s)" % (repr(self.expr),)
+
+class While(Node):
+ def __init__(self, test, body, else_, lineno=None):
+ self.test = test
+ self.body = body
+ self.else_ = else_
+ self.lineno = lineno
+
+ def getChildren(self):
+ children = []
+ children.append(self.test)
+ children.append(self.body)
+ children.append(self.else_)
+ return tuple(children)
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.append(self.test)
+ nodelist.append(self.body)
+ if self.else_ is not None:
+ nodelist.append(self.else_)
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "While(%s, %s, %s)" % (repr(self.test), repr(self.body), repr(self.else_))
+
+class With(Node):
+ def __init__(self, expr, vars, body, lineno=None):
+ self.expr = expr
+ self.vars = vars
+ self.body = body
+ self.lineno = lineno
+
+ def getChildren(self):
+ children = []
+ children.append(self.expr)
+ children.append(self.vars)
+ children.append(self.body)
+ return tuple(children)
+
+ def getChildNodes(self):
+ nodelist = []
+ nodelist.append(self.expr)
+ if self.vars is not None:
+ nodelist.append(self.vars)
+ nodelist.append(self.body)
+ return tuple(nodelist)
+
+ def __repr__(self):
+ return "With(%s, %s, %s)" % (repr(self.expr), repr(self.vars), repr(self.body))
+
+class Yield(Node):
+ def __init__(self, value, lineno=None):
+ self.value = value
+ self.lineno = lineno
+
+ def getChildren(self):
+ return self.value,
+
+ def getChildNodes(self):
+ return self.value,
+
+ def __repr__(self):
+ return "Yield(%s)" % (repr(self.value),)
+
+for name, obj in globals().items():
+ if isinstance(obj, type) and issubclass(obj, Node):
+ nodes[name.lower()] = obj
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/compiler/consts.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,21 @@
+# operation flags
+OP_ASSIGN = 'OP_ASSIGN'
+OP_DELETE = 'OP_DELETE'
+OP_APPLY = 'OP_APPLY'
+
+SC_LOCAL = 1
+SC_GLOBAL = 2
+SC_FREE = 3
+SC_CELL = 4
+SC_UNKNOWN = 5
+
+CO_OPTIMIZED = 0x0001
+CO_NEWLOCALS = 0x0002
+CO_VARARGS = 0x0004
+CO_VARKEYWORDS = 0x0008
+CO_NESTED = 0x0010
+CO_GENERATOR = 0x0020
+CO_GENERATOR_ALLOWED = 0
+CO_FUTURE_DIVISION = 0x2000
+CO_FUTURE_ABSIMPORT = 0x4000
+CO_FUTURE_WITH_STATEMENT = 0x8000
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/compiler/future.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,73 @@
+"""Parser for future statements
+
+"""
+
+from compiler import ast, walk
+
+def is_future(stmt):
+ """Return true if statement is a well-formed future statement"""
+ if not isinstance(stmt, ast.From):
+ return 0
+ if stmt.modname == "__future__":
+ return 1
+ else:
+ return 0
+
+class FutureParser:
+
+ features = ("nested_scopes", "generators", "division",
+ "absolute_import", "with_statement")
+
+ def __init__(self):
+ self.found = {} # set
+
+ def visitModule(self, node):
+ stmt = node.node
+ for s in stmt.nodes:
+ if not self.check_stmt(s):
+ break
+
+ def check_stmt(self, stmt):
+ if is_future(stmt):
+ for name, asname in stmt.names:
+ if name in self.features:
+ self.found[name] = 1
+ else:
+ raise SyntaxError, \
+ "future feature %s is not defined" % name
+ stmt.valid_future = 1
+ return 1
+ return 0
+
+ def get_features(self):
+ """Return list of features enabled by future statements"""
+ return self.found.keys()
+
+class BadFutureParser:
+ """Check for invalid future statements"""
+
+ def visitFrom(self, node):
+ if hasattr(node, 'valid_future'):
+ return
+ if node.modname != "__future__":
+ return
+ raise SyntaxError, "invalid future statement " + repr(node)
+
+def find_futures(node):
+ p1 = FutureParser()
+ p2 = BadFutureParser()
+ walk(node, p1)
+ walk(node, p2)
+ return p1.get_features()
+
+if __name__ == "__main__":
+ import sys
+ from compiler import parseFile, walk
+
+ for file in sys.argv[1:]:
+ print file
+ tree = parseFile(file)
+ v = FutureParser()
+ walk(tree, v)
+ print v.found
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/compiler/misc.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,73 @@
+
+def flatten(tup):
+ elts = []
+ for elt in tup:
+ if isinstance(elt, tuple):
+ elts = elts + flatten(elt)
+ else:
+ elts.append(elt)
+ return elts
+
+class Set:
+ def __init__(self):
+ self.elts = {}
+ def __len__(self):
+ return len(self.elts)
+ def __contains__(self, elt):
+ return self.elts.has_key(elt)
+ def add(self, elt):
+ self.elts[elt] = elt
+ def elements(self):
+ return self.elts.keys()
+ def has_elt(self, elt):
+ return self.elts.has_key(elt)
+ def remove(self, elt):
+ del self.elts[elt]
+ def copy(self):
+ c = Set()
+ c.elts.update(self.elts)
+ return c
+
+class Stack:
+ def __init__(self):
+ self.stack = []
+ self.pop = self.stack.pop
+ def __len__(self):
+ return len(self.stack)
+ def push(self, elt):
+ self.stack.append(elt)
+ def top(self):
+ return self.stack[-1]
+ def __getitem__(self, index): # needed by visitContinue()
+ return self.stack[index]
+
+MANGLE_LEN = 256 # magic constant from compile.c
+
+def mangle(name, klass):
+ if not name.startswith('__'):
+ return name
+ if len(name) + 2 >= MANGLE_LEN:
+ return name
+ if name.endswith('__'):
+ return name
+ try:
+ i = 0
+ while klass[i] == '_':
+ i = i + 1
+ except IndexError:
+ return name
+ klass = klass[i:]
+
+ tlen = len(klass) + len(name)
+ if tlen > MANGLE_LEN:
+ klass = klass[:MANGLE_LEN-tlen]
+
+ return "_%s%s" % (klass, name)
+
+def set_filename(filename, tree):
+ """Set the filename attribute to filename on every node in tree"""
+ worklist = [tree]
+ while worklist:
+ node = worklist.pop(0)
+ node.filename = filename
+ worklist.extend(node.getChildNodes())
--- /dev/null Thu Apr 7 21:41:36 2022
+++ b/sys/lib/python/compiler/pyassem.py Tue Jun 8 19:10:51 2021
@@ -0,0 +1,818 @@
+"""A flow graph representation for Python bytecode"""
+
+import dis
+import new
+import sys
+
+from compiler import misc
+from compiler.consts \
+ import CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS
+
+class FlowGraph:
+ def __init__(self):
+ self.current = self.entry = Block()
+ self.exit = Block("exit")
+ self.blocks = misc.Set()
+ self.blocks.add(self.entry)
+ self.blocks.add(self.exit)
+
+ def startBlock(self, block):
+ if self._debug:
+ if self.current:
+ print "end", repr(self.current)
+ print " next", self.current.next
+ print " ", self.current.get_children()
+ print repr(block)
+ self.current = block
+
+ def nextBlock(self, block=None):
+ # XXX think we need to specify when there is implicit transfer
+ # from one block to the next. might be better to represent this
+ # with explicit JUM