# HG changeset patch # User Atul Varma # Date 1246295973 25200 # Node ID 3c2151124ceed37ac419a0c3cbf81163b434ce0f # Parent 74b7ad049542cd9f93e0b38b134aec422a5b60cf Converted pavement.py to manage.py and added a README. diff -r 74b7ad049542 -r 3c2151124cee .hgignore --- a/.hgignore Mon Jun 29 07:28:56 2009 -0700 +++ b/.hgignore Mon Jun 29 10:19:33 2009 -0700 @@ -1,2 +1,3 @@ syntax: glob *.so +*.pyc diff -r 74b7ad049542 -r 3c2151124cee README --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/README Mon Jun 29 10:19:33 2009 -0700 @@ -0,0 +1,94 @@ +Pymonkey README +--------------- + +Pymonkey is a pure Python CAPI module to expose the Mozilla +SpiderMonkey engine to Python. + +Rationale and Goals: + + * There's an increasing need for being able to run JS on the server + side--particularly untrusted JS. There's Java-based solutions + like Rhino out there, but nothing really mature is available for + the Python world. Ideally, Pymonkey should enable a Python + programmer to create a custom sandboxed environment for executing + JS code without needing to write any C. + + * Pymonkey should have awesome Sphinx documentation with doctests + and all the trappings of a model Python package. Not only should + it be easy for Python programmers to learn how to use the module, + but it should also be easy for them to learn more about how + SpiderMonkey works by reading the docs and playing around with the + code. + + * Pymonkey needs to have outstanding developer ergonomics. Full + cross-language stack tracebacks should be available, for instance, + and developers should be able to easily debug. Access to memory + profiling facilities in JS-land is a must. + + * The module uses the Python CAPI: no SWIG, Pyrex, or other + intermediaries. The obvious disadvantage here is that it means + more C code, but the advantages are that + + (A) contributors don't need to learn anything other than the + Python and SpiderMonkey C APIs to contribute, and + + (B) it means one less dependency, which makes the build process + easier. + + The module also doesn't use ctypes because using the SpiderMonkey + C API requires fairly complex preprocessor macros defined in the + engine's header files. + + Finally, Atul has never really made a straight Python CAPI module + before, so he wanted to give it a try. + +Building and Testing +-------------------- + +Right now building is annoying and difficult because Pymonkey wraps +SpiderMonkey 1.8.1, which doesn't yet exist as standalone code--it's +only available in the mozilla-central HG repository. As such, +Pymonkey currently requires a full build of the Mozilla platform. You +can find out how to do this here: + + https://developer.mozilla.org/en/Build_Documentation + +Once you've built Mozilla, you can build the extension and run the +tests like this: + + python manage.py build --objdir=PATH_TO_OBJDIR + +Where PATH_TO_OBJDIR is the path to your Mozilla build's objdir (if +you don't know what that is, read the build documentation). + +Note that at the moment, the build script is only tested on OS X, and +even then some things need to be done to the environment in order for +pymonkey to be loaded properly; look at manage.py if you need more +specifics on that. Right now this isn't a huge deal because we're only +really concerned with the test suite, which is run automatically after +building--but obviously it's something that needs to be fixed in the +future. + +Example Code +------------ + +Right now the only example code that exists is in the test suite at +test_pymonkey.py. Check it out and feel free to add more. + +Challenges +---------- + +There's a number of challenges that need to be resolved before +pymonkey can be really usable. Here's some of them. + +Garbage Collection + +Python's garbage collection uses reference counting, whereas +SpiderMonkey's is mark-and-sweep. We'll likely run into situations +where there are cycles that exist between SpiderMonkey and Python +objects; this is actually quite similar to the relationship between +XPCOM and JavaScript in the Mozilla platform--XPCOM uses reference +counting too--so detecting such cycles will probably involve creating +something akin to XPCOM's cycle collector [1]. + +[1] https://developer.mozilla.org/en/Interfacing_with_the_XPCOM_cycle_collector diff -r 74b7ad049542 -r 3c2151124cee manage.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/manage.py Mon Jun 29 10:19:33 2009 -0700 @@ -0,0 +1,81 @@ +#! /usr/bin/env python + +import os +import sys + +if __name__ == '__main__': + # This code is run if we're executed directly from the command-line. + + myfile = os.path.abspath(__file__) + mydir = os.path.dirname(myfile) + sys.path.insert(0, os.path.join(mydir, 'python-modules')) + + args = sys.argv[1:] + if not args: + args = ['help'] + + # Have paver run this very file as its pavement script. + args = ['-f', myfile] + args + + import paver.tasks + paver.tasks.main(args) + sys.exit(0) + +# This code is run if we're executed as a pavement script by paver. + +import os +import subprocess +import shutil +import sys + +from paver.easy import * + +@task +@cmdopts([("objdir=", "o", "The root of your Mozilla objdir")]) +def build(options): + """Build the pymonkey Python C extension.""" + + objdir = options.get("objdir") + if not objdir: + print("Objdir not specified! Please specify one with " + "the --objdir option.") + sys.exit(1) + objdir = os.path.abspath(objdir) + incdir = os.path.join(objdir, "dist", "include") + libdir = os.path.join(objdir, "dist", "lib") + + print "Building extension." + + result = subprocess.call( + ["g++", + "-framework", "Python", + "-I%s" % incdir, + "-L%s" % libdir, + "-lmozjs", + "-o", "pymonkey.so", + "-dynamiclib", + "pymonkey.c", + "utils.c", + "object.c", + "undefined.c", + "context.c", + "runtime.c"] + ) + + if result: + sys.exit(result) + + print "Running test suite." + + new_env = {} + new_env.update(os.environ) + new_env['DYLD_LIBRARY_PATH'] = libdir + + result = subprocess.call( + [sys.executable, + "test_pymonkey.py"], + env = new_env + ) + + if result: + sys.exit(result) diff -r 74b7ad049542 -r 3c2151124cee pavement.py --- a/pavement.py Mon Jun 29 07:28:56 2009 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,49 +0,0 @@ -import os -import subprocess -import shutil -import sys - -from paver.easy import * - -@task -def auto(options): - objdir = os.path.join("..", "mozilla-stuff", "basic-firefox") - objdir = os.path.abspath(objdir) - incdir = os.path.join(objdir, "dist", "include") - libdir = os.path.join(objdir, "dist", "lib") - - print "Building extension." - - result = subprocess.call( - ["g++", - "-framework", "Python", - "-I%s" % incdir, - "-L%s" % libdir, - "-lmozjs", - "-o", "pymonkey.so", - "-dynamiclib", - "pymonkey.c", - "utils.c", - "object.c", - "undefined.c", - "context.c", - "runtime.c"] - ) - - if result: - sys.exit(result) - - print "Running test suite." - - new_env = {} - new_env.update(os.environ) - new_env['DYLD_LIBRARY_PATH'] = libdir - - result = subprocess.call( - [sys.executable, - "test_pymonkey.py"], - env = new_env - ) - - if result: - sys.exit(result) diff -r 74b7ad049542 -r 3c2151124cee python-modules/paver/__init__.py diff -r 74b7ad049542 -r 3c2151124cee python-modules/paver/defaults.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/python-modules/paver/defaults.py Mon Jun 29 10:19:33 2009 -0700 @@ -0,0 +1,20 @@ +"""The namespace for the pavement to run in, also imports default tasks.""" + +import warnings + +warnings.warn("""paver.defaults is deprecated. Import from paver.easy instead. +Note that you will need to add additional declarations for exactly +equivalent behavior. Specifically: + +from paver.easy import * +import paver.misctasks +from paver import setuputils + +setuputils.install_distutils_tasks() +""", DeprecationWarning, 2) + +from paver.easy import * +from paver.misctasks import * +from paver import setuputils + +setuputils.install_distutils_tasks() diff -r 74b7ad049542 -r 3c2151124cee python-modules/paver/easy.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/python-modules/paver/easy.py Mon Jun 29 10:19:33 2009 -0700 @@ -0,0 +1,106 @@ +import subprocess +import sys + +from paver import tasks +from paver.options import Bunch + +def dry(message, func, *args, **kw): + """Wraps a function that performs a destructive operation, so that + nothing will happen when a dry run is requested. + + Runs func with the given arguments and keyword arguments. If this + is a dry run, print the message rather than running the function.""" + info(message) + if tasks.environment.dry_run: + return + return func(*args, **kw) + +def error(message, *args): + """Displays an error message to the user.""" + tasks.environment.error(message, *args) + +def info(message, *args): + """Displays a message to the user. If the quiet option is specified, the + message will not be displayed.""" + tasks.environment.info(message, *args) + +def debug(message, *args): + """Displays a message to the user, but only if the verbose flag is + set.""" + tasks.environment.debug(message, *args) + +def sh(command, capture=False, ignore_error=False, cwd=None): + """Runs an external command. If capture is True, the output of the + command will be captured and returned as a string. If the command + has a non-zero return code raise a BuildFailure. You can pass + ignore_error=True to allow non-zero return codes to be allowed to + pass silently, silently into the night. If you pass cwd='some/path' + paver will chdir to 'some/path' before exectuting the command. + + If the dry_run option is True, the command will not + actually be run.""" + def runpipe(): + kwargs = { 'shell': True, 'stderr': subprocess.PIPE, 'cwd': cwd} + if capture: + kwargs['stdout'] = subprocess.PIPE + p = subprocess.Popen(command, **kwargs) + p.wait() + if p.returncode and not ignore_error: + error(p.stderr.read()) + raise BuildFailure("Subprocess return code: %d" % p.returncode) + + if capture: + return p.stdout.read() + + return dry(command, runpipe) + + +class _SimpleProxy(object): + __initialized = False + def __init__(self, rootobj, name): + self.__rootobj = rootobj + self.__name = name + self.__initialized = True + + def __get_object(self): + return getattr(self.__rootobj, self.__name) + + def __getattr__(self, attr): + return getattr(self.__get_object(), attr) + + def __setattr__(self, attr, value): + if self.__initialized: + setattr(self.__get_object(), attr, value) + else: + super(_SimpleProxy, self).__setattr__(attr, value) + + def __call__(self, *args, **kw): + return self.__get_object()(*args, **kw) + + def __str__(self): + return str(self.__get_object()) + + def __repr__(self): + return repr(self.__get_object()) + +environment = _SimpleProxy(tasks, "environment") +options = _SimpleProxy(environment, "options") +call_task = _SimpleProxy(environment, "call_task") + +call_pavement = tasks.call_pavement +task = tasks.task +needs = tasks.needs +cmdopts = tasks.cmdopts +consume_args = tasks.consume_args +no_auto = tasks.no_auto +BuildFailure = tasks.BuildFailure +PavementError = tasks.PavementError + +# these are down here to avoid circular dependencies. Ideally, nothing would +# be using paver.easy other than pavements. +if sys.version_info > (2,5): + from paver.path25 import path, pushd +else: + from paver.path import path + +import paver.misctasks diff -r 74b7ad049542 -r 3c2151124cee python-modules/paver/misctasks.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/python-modules/paver/misctasks.py Mon Jun 29 10:19:33 2009 -0700 @@ -0,0 +1,67 @@ +"""Miscellaneous tasks that don't fit into one of the other groupings.""" +import os + +from paver.easy import dry, path, task + +_docsdir = os.path.join(os.path.dirname(__file__), "docs") +if os.path.exists(_docsdir): + @task + def paverdocs(): + """Open your web browser and display Paver's documentation.""" + import webbrowser + webbrowser.open("file://" + + (os.path.join(os.path.abspath(_docsdir), 'index.html'))) + +@task +def minilib(options): + """Create a Paver mini library that contains enough for a simple + pavement.py to be installed using a generated setup.py. This + is a good temporary measure until more people have deployed paver. + The output file is 'paver-minilib.zip' in the current directory. + + Options: + + extra_files + list of other paver modules to include (don't include the .py + extension). By default, the following modules are included: + defaults, path, release, setuputils, misctasks, options, + tasks, easy + """ + import paver + paverdir = path(paver.__file__).dirname() + filelist = ['__init__', 'defaults', 'path', 'path25', 'release', + 'setuputils', "misctasks", "options", "tasks", "easy"] + filelist.extend(options.get('extra_files', [])) + output_file = 'paver-minilib.zip' + + def generate_zip(): + import zipfile + destfile = zipfile.ZipFile(output_file, "w", zipfile.ZIP_DEFLATED) + for filename in filelist: + destfile.write( + paverdir / (filename + ".py"), + "paver/" + (filename + ".py")) + destfile.close() + dry("Generate %s" % output_file, generate_zip) + +@task +def generate_setup(): + """Generates a setup.py file that uses paver behind the scenes. This + setup.py file will look in the directory that the user is running it + in for a paver-minilib.zip and will add that to sys.path if available. + Otherwise, it will just assume that paver is available.""" + from paver.easy import dry + def write_setup(): + setup = open("setup.py", "w") + setup.write("""import os +if os.path.exists("paver-minilib.zip"): + import sys + sys.path.insert(0, "paver-minilib.zip") + +import paver.tasks +paver.tasks.main() +""") + setup.close() + + dry("Write setup.py", write_setup) + \ No newline at end of file diff -r 74b7ad049542 -r 3c2151124cee python-modules/paver/options.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/python-modules/paver/options.py Mon Jun 29 10:19:33 2009 -0700 @@ -0,0 +1,171 @@ +class OptionsError(Exception): + pass + +class Bunch(dict): + """A dictionary that provides attribute-style access.""" + + def __repr__(self): + keys = self.keys() + keys.sort() + args = ', '.join(['%s=%r' % (key, self[key]) for key in keys]) + return '%s(%s)' % (self.__class__.__name__, args) + + def __getitem__(self, key): + item = dict.__getitem__(self, key) + if callable(item): + return item() + return item + + def __getattr__(self, name): + try: + return self[name] + except KeyError: + raise AttributeError(name) + + __setattr__ = dict.__setitem__ + + def __delattr__(self, name): + try: + del self[name] + except KeyError: + raise AttributeError(name) + +class Namespace(Bunch): + """A Bunch that will search dictionaries contained within to find a value. + The search order is set via the order() method. See the order method for + more information about search order. + """ + def __init__(self, d=None, **kw): + self._sections = [] + self._ordering = None + self.update(d, **kw) + + def order(self, *keys, **kw): + """Set the search order for this namespace. The arguments + should be the list of keys in the order you wish to search, + or a dictionary/Bunch that you want to search. + Keys that are left out will not be searched. If you pass in + no arguments, then the default ordering will be used. (The default + is to search the global space first, then in the order in + which the sections were created.) + + If you pass in a key name that is not a section, that + key will be silently removed from the list. + + Keyword arguments are: + + add_rest=False + put the sections you list at the front of the search + and add the remaining sections to the end + """ + if not keys: + self._ordering = None + return + + order = [] + for item in keys: + if isinstance(item, dict) or item in self._sections: + order.append(item) + + if kw.get('add_rest'): + # this is not efficient. do we care? probably not. + for item in self._sections: + if item not in order: + order.append(item) + self._ordering = order + + def clear(self): + self._ordering = None + self._sections = [] + super(Namespace, self).clear() + + def setdotted(self, key, value): + """Sets a namespace key, value pair where the key + can use dotted notation to set sub-values. For example, + the key "foo.bar" will set the "bar" value in the "foo" + Bunch in this Namespace. If foo does not exist, it is created + as a Bunch. If foo is a value, an OptionsError will be + raised.""" + segments = key.split(".") + obj = self + segment = segments.pop(0) + while segments: + if segment not in obj: + obj[segment] = Bunch() + obj = obj[segment] + if not isinstance(obj, dict): + raise OptionsError("In setting option '%s', %s was already a value" + % (key, segment)) + segment = segments.pop(0) + obj[segment] = value + + def __setitem__(self, key, value): + if isinstance(value, dict): + self._sections.insert(0, key) + super(Namespace, self).__setitem__(key, value) + + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + + def __getitem__(self, key): + order = self._ordering + if order is None: + order = self._sections + try: + return super(Namespace, self).__getitem__(key) + except KeyError: + pass + for section in order: + if isinstance(section, dict): + try: + return section[key] + except KeyError: + pass + else: + try: + return self[section][key] + except KeyError: + pass + raise KeyError("Key %s not found in namespace" % key) + + def __setattr__(self, key, value): + if key.startswith("_"): + object.__setattr__(self, key, value) + else: + self[key] = value + + def __delitem__(self, key): + try: + index = self._sections.index(key) + del self._sections[index] + except ValueError: + pass + super(Namespace, self).__delitem__(key) + + def update(self, d=None, **kw): + """Update the namespace. This is less efficient than the standard + dict.update but is necessary to keep track of the sections that we'll be + searching.""" + items = [] + if d: + # look up keys even though we call items + # because that's what the dict.update + # doc says + if hasattr(d, 'keys'): + items.extend(list(d.items())) + else: + items.extend(list(d)) + items.extend(list(kw.items())) + for key, value in items: + self[key] = value + + __call__ = update + + def setdefault(self, key, default): + if not key in self: + self[key] = default + return default + return self[key] diff -r 74b7ad049542 -r 3c2151124cee python-modules/paver/path.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/python-modules/paver/path.py Mon Jun 29 10:19:33 2009 -0700 @@ -0,0 +1,999 @@ +""" path.py - An object representing a path to a file or directory. + +Example:: + + from path import path + d = path('/home/guido/bin') + for f in d.files('*.py'): + f.chmod(0755) + +This module requires Python 2.2 or later. + + +:URL: http://www.jorendorff.com/articles/python/path +:Author: Jason Orendorff (and others - see the url!) +:Date: 9 Mar 2007 + +This has been modified from the original to avoid dry run issues. +""" + + +# TODO +# - Tree-walking functions don't avoid symlink loops. Matt Harrison +# sent me a patch for this. +# - Bug in write_text(). It doesn't support Universal newline mode. +# - Better error message in listdir() when self isn't a +# directory. (On Windows, the error message really sucks.) +# - Make sure everything has a good docstring. +# - Add methods for regex find and replace. +# - guess_content_type() method? +# - Perhaps support arguments to touch(). + +import sys, warnings, os, fnmatch, glob, shutil, codecs + +try: + from hashlib import md5 +except ImportError: + # compatibility for versions before 2.5 + import md5 + md5 = md5.new + +__version__ = '2.2' +__all__ = ['path'] + +# Platform-specific support for path.owner +if os.name == 'nt': + try: + import win32security + except ImportError: + win32security = None +else: + try: + import pwd + except ImportError: + pwd = None + +# Pre-2.3 support. Are unicode filenames supported? +_base = str +_getcwd = os.getcwd +try: + if os.path.supports_unicode_filenames: + _base = unicode + _getcwd = os.getcwdu +except AttributeError: + pass + +# Pre-2.3 workaround for booleans +try: + True, False +except NameError: + True, False = 1, 0 + +# Pre-2.3 workaround for basestring. +try: + basestring +except NameError: + basestring = (str, unicode) + +# Universal newline support +_textmode = 'r' +if hasattr(file, 'newlines'): + _textmode = 'U' + + +class TreeWalkWarning(Warning): + pass + +class path(_base): + """ Represents a filesystem path. + + For documentation on individual methods, consult their + counterparts in os.path. + """ + + # --- Special Python methods. + + def __repr__(self): + return 'path(%s)' % _base.__repr__(self) + + # Adding a path and a string yields a path. + def __add__(self, more): + try: + resultStr = _base.__add__(self, more) + except TypeError: #Python bug + resultStr = NotImplemented + if resultStr is NotImplemented: + return resultStr + return self.__class__(resultStr) + + def __radd__(self, other): + if isinstance(other, basestring): + return self.__class__(other.__add__(self)) + else: + return NotImplemented + + # The / operator joins paths. + def __div__(self, rel): + """ fp.__div__(rel) == fp / rel == fp.joinpath(rel) + + Join two path components, adding a separator character if + needed. + """ + return self.__class__(os.path.join(self, rel)) + + # Make the / operator work even when true division is enabled. + __truediv__ = __div__ + + def getcwd(cls): + """ Return the current working directory as a path object. """ + return cls(_getcwd()) + getcwd = classmethod(getcwd) + + def chdir(self): + """Change current directory.""" + os.chdir(self) + + + # --- Operations on path strings. + + isabs = os.path.isabs + def abspath(self): return self.__class__(os.path.abspath(self)) + def normcase(self): return self.__class__(os.path.normcase(self)) + def normpath(self): return self.__class__(os.path.normpath(self)) + def realpath(self): return self.__class__(os.path.realpath(self)) + def expanduser(self): return self.__class__(os.path.expanduser(self)) + def expandvars(self): return self.__class__(os.path.expandvars(self)) + def dirname(self): return self.__class__(os.path.dirname(self)) + basename = os.path.basename + + def expand(self): + """ Clean up a filename by calling expandvars(), + expanduser(), and normpath() on it. + + This is commonly everything needed to clean up a filename + read from a configuration file, for example. + """ + return self.expandvars().expanduser().normpath() + + def _get_namebase(self): + base, ext = os.path.splitext(self.name) + return base + + def _get_ext(self): + f, ext = os.path.splitext(_base(self)) + return ext + + def _get_drive(self): + drive, r = os.path.splitdrive(self) + return self.__class__(drive) + + parent = property( + dirname, None, None, + """ This path's parent directory, as a new path object. + + For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib') + """) + + name = property( + basename, None, None, + """ The name of this file or directory without the full path. + + For example, path('/usr/local/lib/libpython.so').name == 'libpython.so' + """) + + namebase = property( + _get_namebase, None, None, + """ The same as path.name, but with one file extension stripped off. + + For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz', + but path('/home/guido/python.tar.gz').namebase == 'python.tar' + """) + + ext = property( + _get_ext, None, None, + """ The file extension, for example '.py'. """) + + drive = property( + _get_drive, None, None, + """ The drive specifier, for example 'C:'. + This is always empty on systems that don't use drive specifiers. + """) + + def splitpath(self): + """ p.splitpath() -> Return (p.parent, p.name). """ + parent, child = os.path.split(self) + return self.__class__(parent), child + + def splitdrive(self): + """ p.splitdrive() -> Return (p.drive, ). + + Split the drive specifier from this path. If there is + no drive specifier, p.drive is empty, so the return value + is simply (path(''), p). This is always the case on Unix. + """ + drive, rel = os.path.splitdrive(self) + return self.__class__(drive), rel + + def splitext(self): + """ p.splitext() -> Return (p.stripext(), p.ext). + + Split the filename extension from this path and return + the two parts. Either part may be empty. + + The extension is everything from '.' to the end of the + last path segment. This has the property that if + (a, b) == p.splitext(), then a + b == p. + """ + filename, ext = os.path.splitext(self) + return self.__class__(filename), ext + + def stripext(self): + """ p.stripext() -> Remove one file extension from the path. + + For example, path('/home/guido/python.tar.gz').stripext() + returns path('/home/guido/python.tar'). + """ + return self.splitext()[0] + + if hasattr(os.path, 'splitunc'): + def splitunc(self): + unc, rest = os.path.splitunc(self) + return self.__class__(unc), rest + + def _get_uncshare(self): + unc, r = os.path.splitunc(self) + return self.__class__(unc) + + uncshare = property( + _get_uncshare, None, None, + """ The UNC mount point for this path. + This is empty for paths on local drives. """) + + def joinpath(self, *args): + """ Join two or more path components, adding a separator + character (os.sep) if needed. Returns a new path + object. + """ + return self.__class__(os.path.join(self, *args)) + + def splitall(self): + r""" Return a list of the path components in this path. + + The first item in the list will be a path. Its value will be + either os.curdir, os.pardir, empty, or the root directory of + this path (for example, '/' or 'C:\\'). The other items in + the list will be strings. + + ``path.path.joinpath(*result)`` will yield the original path. + """ + parts = [] + loc = self + while loc != os.curdir and loc != os.pardir: + prev = loc + loc, child = prev.splitpath() + if loc == prev: + break + parts.append(child) + parts.append(loc) + parts.reverse() + return parts + + def relpath(self): + """ Return this path as a relative path, + based from the current working directory. + """ + cwd = self.__class__(os.getcwd()) + return cwd.relpathto(self) + + def relpathto(self, dest): + """ Return a relative path from self to dest. + + If there is no relative path from self to dest, for example if + they reside on different drives in Windows, then this returns + dest.abspath(). + """ + origin = self.abspath() + dest = self.__class__(dest).abspath() + + orig_list = origin.normcase().splitall() + # Don't normcase dest! We want to preserve the case. + dest_list = dest.splitall() + + if orig_list[0] != os.path.normcase(dest_list[0]): + # Can't get here from there. + return dest + + # Find the location where the two paths start to differ. + i = 0 + for start_seg, dest_seg in zip(orig_list, dest_list): + if start_seg != os.path.normcase(dest_seg): + break + i += 1 + + # Now i is the point where the two paths diverge. + # Need a certain number of "os.pardir"s to work up + # from the origin to the point of divergence. + segments = [os.pardir] * (len(orig_list) - i) + # Need to add the diverging part of dest_list. + segments += dest_list[i:] + if len(segments) == 0: + # If they happen to be identical, use os.curdir. + relpath = os.curdir + else: + relpath = os.path.join(*segments) + return self.__class__(relpath) + + # --- Listing, searching, walking, and matching + + def listdir(self, pattern=None): + """ D.listdir() -> List of items in this directory. + + Use D.files() or D.dirs() instead if you want a listing + of just files or just subdirectories. + + The elements of the list are path objects. + + With the optional 'pattern' argument, this only lists + items whose names match the given pattern. + """ + names = os.listdir(self) + if pattern is not None: + names = fnmatch.filter(names, pattern) + return [self / child for child in names] + + def dirs(self, pattern=None): + """ D.dirs() -> List of this directory's subdirectories. + + The elements of the list are path objects. + This does not walk recursively into subdirectories + (but see path.walkdirs). + + With the optional 'pattern' argument, this only lists + directories whose names match the given pattern. For + example:: + d.dirs('build-*') + """ + return [p for p in self.listdir(pattern) if p.isdir()] + + def files(self, pattern=None): + """ D.files() -> List of the files in this directory. + + The elements of the list are path objects. + This does not walk into subdirectories (see path.walkfiles). + + With the optional 'pattern' argument, this only lists files + whose names match the given pattern. For example:: + d.files('*.pyc') + """ + + return [p for p in self.listdir(pattern) if p.isfile()] + + def walk(self, pattern=None, errors='strict'): + """ D.walk() -> iterator over files and subdirs, recursively. + + The iterator yields path objects naming each child item of + this directory and its descendants. This requires that + D.isdir(). + + This performs a depth-first traversal of the directory tree. + Each directory is returned just before all its children. + + The errors= keyword argument controls behavior when an + error occurs. The default is 'strict', which causes an + exception. The other allowed values are 'warn', which + reports the error via warnings.warn(), and 'ignore'. + """ + if errors not in ('strict', 'warn', 'ignore'): + raise ValueError("invalid errors parameter") + + try: + childList = self.listdir() + except Exception: + if errors == 'ignore': + return + elif errors == 'warn': + warnings.warn( + "Unable to list directory '%s': %s" + % (self, sys.exc_info()[1]), + TreeWalkWarning) + return + else: + raise + + for child in childList: + if pattern is None or child.fnmatch(pattern): + yield child + try: + isdir = child.isdir() + except Exception: + if errors == 'ignore': + isdir = False + elif errors == 'warn': + warnings.warn( + "Unable to access '%s': %s" + % (child, sys.exc_info()[1]), + TreeWalkWarning) + isdir = False + else: + raise + + if isdir: + for item in child.walk(pattern, errors): + yield item + + def walkdirs(self, pattern=None, errors='strict'): + """ D.walkdirs() -> iterator over subdirs, recursively. + + With the optional 'pattern' argument, this yields only + directories whose names match the given pattern. For + example, ``mydir.walkdirs('*test')`` yields only directories + with names ending in 'test'. + + The errors= keyword argument controls behavior when an + error occurs. The default is 'strict', which causes an + exception. The other allowed values are 'warn', which + reports the error via warnings.warn(), and 'ignore'. + """ + if errors not in ('strict', 'warn', 'ignore'): + raise ValueError("invalid errors parameter") + + try: + dirs = self.dirs() + except Exception: + if errors == 'ignore': + return + elif errors == 'warn': + warnings.warn( + "Unable to list directory '%s': %s" + % (self, sys.exc_info()[1]), + TreeWalkWarning) + return + else: + raise + + for child in dirs: + if pattern is None or child.fnmatch(pattern): + yield child + for subsubdir in child.walkdirs(pattern, errors): + yield subsubdir + + def walkfiles(self, pattern=None, errors='strict'): + """ D.walkfiles() -> iterator over files in D, recursively. + + The optional argument, pattern, limits the results to files + with names that match the pattern. For example, + ``mydir.walkfiles('*.tmp')`` yields only files with the .tmp + extension. + """ + if errors not in ('strict', 'warn', 'ignore'): + raise ValueError("invalid errors parameter") + + try: + childList = self.listdir() + except Exception: + if errors == 'ignore': + return + elif errors == 'warn': + warnings.warn( + "Unable to list directory '%s': %s" + % (self, sys.exc_info()[1]), + TreeWalkWarning) + return + else: + raise + + for child in childList: + try: + isfile = child.isfile() + isdir = not isfile and child.isdir() + except: + if errors == 'ignore': + continue + elif errors == 'warn': + warnings.warn( + "Unable to access '%s': %s" + % (self, sys.exc_info()[1]), + TreeWalkWarning) + continue + else: + raise + + if isfile: + if pattern is None or child.fnmatch(pattern): + yield child + elif isdir: + for f in child.walkfiles(pattern, errors): + yield f + + def fnmatch(self, pattern): + """ Return True if self.name matches the given pattern. + + pattern - A filename pattern with wildcards, + for example ``'*.py'``. + """ + return fnmatch.fnmatch(self.name, pattern) + + def glob(self, pattern): + """ Return a list of path objects that match the pattern. + + pattern - a path relative to this directory, with wildcards. + + For example, path('/users').glob('*/bin/*') returns a list + of all the files users have in their bin directories. + """ + cls = self.__class__ + return [cls(s) for s in glob.glob(_base(self / pattern))] + + + # --- Reading or writing an entire file at once. + + # TODO: file writing should not occur during dry runs XXX + def open(self, mode='r'): + """ Open this file. Return a file object. """ + return file(self, mode) + + def bytes(self): + """ Open this file, read all bytes, return them as a string. """ + f = self.open('rb') + try: + return f.read() + finally: + f.close() + + def write_bytes(self, bytes, append=False): + """ Open this file and write the given bytes to it. + + Default behavior is to overwrite any existing file. + Call p.write_bytes(bytes, append=True) to append instead. + """ + if append: + mode = 'ab' + else: + mode = 'wb' + f = self.open(mode) + try: + f.write(bytes) + finally: + f.close() + + def text(self, encoding=None, errors='strict'): + r""" Open this file, read it in, return the content as a string. + + This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r' + are automatically translated to '\n'. + + Optional arguments: + + encoding - The Unicode encoding (or character set) of + the file. If present, the content of the file is + decoded and returned as a unicode object; otherwise + it is returned as an 8-bit str. + errors - How to handle Unicode errors; see help(str.decode) + for the options. Default is 'strict'. + """ + if encoding is None: + # 8-bit + f = self.open(_textmode) + try: + return f.read() + finally: + f.close() + else: + # Unicode + f = codecs.open(self, 'r', encoding, errors) + # (Note - Can't use 'U' mode here, since codecs.open + # doesn't support 'U' mode, even in Python 2.3.) + try: + t = f.read() + finally: + f.close() + return (t.replace(u'\r\n', u'\n') + .replace(u'\r\x85', u'\n') + .replace(u'\r', u'\n') + .replace(u'\x85', u'\n') + .replace(u'\u2028', u'\n')) + + def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False): + r""" Write the given text to this file. + + The default behavior is to overwrite any existing file; + to append instead, use the 'append=True' keyword argument. + + There are two differences between path.write_text() and + path.write_bytes(): newline handling and Unicode handling. + See below. + + Parameters: + + - text - str/unicode - The text to be written. + + - encoding - str - The Unicode encoding that will be used. + This is ignored if 'text' isn't a Unicode string. + + - errors - str - How to handle Unicode encoding errors. + Default is 'strict'. See help(unicode.encode) for the + options. This is ignored if 'text' isn't a Unicode + string. + + - linesep - keyword argument - str/unicode - The sequence of + characters to be used to mark end-of-line. The default is + os.linesep. You can also specify None; this means to + leave all newlines as they are in 'text'. + + - append - keyword argument - bool - Specifies what to do if + the file already exists (True: append to the end of it; + False: overwrite it.) The default is False. + + + --- Newline handling. + + write_text() converts all standard end-of-line sequences + ('\n', '\r', and '\r\n') to your platform's default end-of-line + sequence (see os.linesep; on Windows, for example, the + end-of-line marker is '\r\n'). + + If you don't like your platform's default, you can override it + using the 'linesep=' keyword argument. If you specifically want + write_text() to preserve the newlines as-is, use 'linesep=None'. + + This applies to Unicode text the same as to 8-bit text, except + there are three additional standard Unicode end-of-line sequences: + u'\x85', u'\r\x85', and u'\u2028'. + + (This is slightly different from when you open a file for + writing with fopen(filename, "w") in C or file(filename, 'w') + in Python.) + + + --- Unicode + + If 'text' isn't Unicode, then apart from newline handling, the + bytes are written verbatim to the file. The 'encoding' and + 'errors' arguments are not used and must be omitted. + + If 'text' is Unicode, it is first converted to bytes using the + specified 'encoding' (or the default encoding if 'encoding' + isn't specified). The 'errors' argument applies only to this + conversion. + + """ + if isinstance(text, unicode): + if linesep is not None: + # Convert all standard end-of-line sequences to + # ordinary newline characters. + text = (text.replace(u'\r\n', u'\n') + .replace(u'\r\x85', u'\n') + .replace(u'\r', u'\n') + .replace(u'\x85', u'\n') + .replace(u'\u2028', u'\n')) + text = text.replace(u'\n', linesep) + if encoding is None: + encoding = sys.getdefaultencoding() + bytes = text.encode(encoding, errors) + else: + # It is an error to specify an encoding if 'text' is + # an 8-bit string. + assert encoding is None + + if linesep is not None: + text = (text.replace('\r\n', '\n') + .replace('\r', '\n')) + bytes = text.replace('\n', linesep) + + self.write_bytes(bytes, append) + + def lines(self, encoding=None, errors='strict', retain=True): + r""" Open this file, read all lines, return them in a list. + + Optional arguments: + encoding - The Unicode encoding (or character set) of + the file. The default is None, meaning the content + of the file is read as 8-bit characters and returned + as a list of (non-Unicode) str objects. + errors - How to handle Unicode errors; see help(str.decode) + for the options. Default is 'strict' + retain - If true, retain newline characters; but all newline + character combinations ('\r', '\n', '\r\n') are + translated to '\n'. If false, newline characters are + stripped off. Default is True. + + This uses 'U' mode in Python 2.3 and later. + """ + if encoding is None and retain: + f = self.open(_textmode) + try: + return f.readlines() + finally: + f.close() + else: + return self.text(encoding, errors).splitlines(retain) + + def write_lines(self, lines, encoding=None, errors='strict', + linesep=os.linesep, append=False): + r""" Write the given lines of text to this file. + + By default this overwrites any existing file at this path. + + This puts a platform-specific newline sequence on every line. + See 'linesep' below. + + lines - A list of strings. + + encoding - A Unicode encoding to use. This applies only if + 'lines' contains any Unicode strings. + + errors - How to handle errors in Unicode encoding. This + also applies only to Unicode strings. + + linesep - The desired line-ending. This line-ending is + applied to every line. If a line already has any + standard line ending ('\r', '\n', '\r\n', u'\x85', + u'\r\x85', u'\u2028'), that will be stripped off and + this will be used instead. The default is os.linesep, + which is platform-dependent ('\r\n' on Windows, '\n' on + Unix, etc.) Specify None to write the lines as-is, + like file.writelines(). + + Use the keyword argument append=True to append lines to the + file. The default is to overwrite the file. Warning: + When you use this with Unicode data, if the encoding of the + existing data in the file is different from the encoding + you specify with the encoding= parameter, the result is + mixed-encoding data, which can really confuse someone trying + to read the file later. + """ + if append: + mode = 'ab' + else: + mode = 'wb' + f = self.open(mode) + try: + for line in lines: + isUnicode = isinstance(line, unicode) + if linesep is not None: + # Strip off any existing line-end and add the + # specified linesep string. + if isUnicode: + if line[-2:] in (u'\r\n', u'\x0d\x85'): + line = line[:-2] + elif line[-1:] in (u'\r', u'\n', + u'\x85', u'\u2028'): + line = line[:-1] + else: + if line[-2:] == '\r\n': + line = line[:-2] + elif line[-1:] in ('\r', '\n'): + line = line[:-1] + line += linesep + if isUnicode: + if encoding is None: + encoding = sys.getdefaultencoding() + line = line.encode(encoding, errors) + f.write(line) + finally: + f.close() + + def read_md5(self): + """ Calculate the md5 hash for this file. + + This reads through the entire file. + """ + f = self.open('rb') + try: + m = md5() + while True: + d = f.read(8192) + if not d: + break + m.update(d) + finally: + f.close() + return m.digest() + + # --- Methods for querying the filesystem. + + exists = os.path.exists + isdir = os.path.isdir + isfile = os.path.isfile + islink = os.path.islink + ismount = os.path.ismount + + if hasattr(os.path, 'samefile'): + samefile = os.path.samefile + + getatime = os.path.getatime + atime = property( + getatime, None, None, + """ Last access time of the file. """) + + getmtime = os.path.getmtime + mtime = property( + getmtime, None, None, + """ Last-modified time of the file. """) + + if hasattr(os.path, 'getctime'): + getctime = os.path.getctime + ctime = property( + getctime, None, None, + """ Creation time of the file. """) + + getsize = os.path.getsize + size = property( + getsize, None, None, + """ Size of the file, in bytes. """) + + if hasattr(os, 'access'): + def access(self, mode): + """ Return true if current user has access to this path. + + mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK + """ + return os.access(self, mode) + + def stat(self): + """ Perform a stat() system call on this path. """ + return os.stat(self) + + def lstat(self): + """ Like path.stat(), but do not follow symbolic links. """ + return os.lstat(self) + + def get_owner(self): + r""" Return the name of the owner of this file or directory. + + This follows symbolic links. + + On Windows, this returns a name of the form ur'DOMAIN\User Name'. + On Windows, a group can own a file or directory. + """ + if os.name == 'nt': + if win32security is None: + raise Exception("path.owner requires win32all to be installed") + desc = win32security.GetFileSecurity( + self, win32security.OWNER_SECURITY_INFORMATION) + sid = desc.GetSecurityDescriptorOwner() + account, domain, typecode = win32security.LookupAccountSid(None, sid) + return domain + u'\\' + account + else: + if pwd is None: + raise NotImplementedError("path.owner is not implemented on this platform.") + st = self.stat() + return pwd.getpwuid(st.st_uid).pw_name + + owner = property( + get_owner, None, None, + """ Name of the owner of this file or directory. """) + + if hasattr(os, 'statvfs'): + def statvfs(self): + """ Perform a statvfs() system call on this path. """ + return os.statvfs(self) + + if hasattr(os, 'pathconf'): + def pathconf(self, name): + return os.pathconf(self, name) + + + # --- Modifying operations on files and directories + + def utime(self, times): + """ Set the access and modified times of this file. """ + os.utime(self, times) + + def chmod(self, mode): + os.chmod(self, mode) + + if hasattr(os, 'chown'): + def chown(self, uid, gid): + os.chown(self, uid, gid) + + def rename(self, new): + dry("rename %s to %s" % (self, new), os.rename, self, new) + + def renames(self, new): + dry("renames %s to %s" % (self, new), os.renames, self, new) + + + # --- Create/delete operations on directories + + def mkdir(self, mode=0777): + if not self.exists(): + dry("mkdir %s (mode %s)" % (self, mode), os.mkdir, self, mode) + + def makedirs(self, mode=0777): + if not self.exists(): + dry("makedirs %s (mode %s)" % (self, mode), os.makedirs, self, mode) + + def rmdir(self): + if self.exists(): + dry("rmdir %s" % (self), os.rmdir, self) + + def removedirs(self): + if self.exists(): + dry("removedirs %s" % (self), os.removedirs, self) + + + # --- Modifying operations on files + + def touch(self): + """ Set the access/modified times of this file to the current time. + Create the file if it does not exist. + """ + def do_touch(): + fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666) + os.close(fd) + os.utime(self, None) + dry("touch %s" % (self), do_touch) + + def remove(self): + if self.exists(): + dry("remove %s" % (self), os.remove, self) + + def unlink(self): + if self.exists(): + dry("unlink %s" % (self), os.unlink, self) + + + # --- Links + # TODO: mark these up for dry run XXX + + if hasattr(os, 'link'): + def link(self, newpath): + """ Create a hard link at 'newpath', pointing to this file. """ + os.link(self, newpath) + + if hasattr(os, 'symlink'): + def symlink(self, newlink): + """ Create a symbolic link at 'newlink', pointing here. """ + os.symlink(self, newlink) + + if hasattr(os, 'readlink'): + def readlink(self): + """ Return the path to which this symbolic link points. + + The result may be an absolute or a relative path. + """ + return self.__class__(os.readlink(self)) + + def readlinkabs(self): + """ Return the path to which this symbolic link points. + + The result is always an absolute path. + """ + p = self.readlink() + if p.isabs(): + return p + else: + return (self.parent / p).abspath() + + + # --- High-level functions from shutil + + def copy(self, dst): + dry("copy %s %s" % (self, dst), shutil.copy, self, dst) + + def copytree(self, dst, *args, **kw): + dry("copytree %s %s" % (self, dst), shutil.copytree, + self, dst, *args, **kw) + + if hasattr(shutil, 'move'): + def move(self, dst): + dry("move %s %s" % (self, dst), shutil.move, self, dst) + + def rmtree(self, *args, **kw): + if self.exists(): + dry("rmtree %s %s %s" % (self, args, kw), shutil.rmtree, + self, *args, **kw) + + + # --- Special stuff from os + + if hasattr(os, 'chroot'): + def chroot(self): + os.chroot(self) + + if hasattr(os, 'startfile'): + def startfile(self): + os.startfile(self) + +from paver.easy import dry diff -r 74b7ad049542 -r 3c2151124cee python-modules/paver/path25.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/python-modules/paver/path25.py Mon Jun 29 10:19:33 2009 -0700 @@ -0,0 +1,35 @@ +"""Python 2.5+ path module that adds with-statement features.""" +from __future__ import with_statement + +import os +from contextlib import contextmanager + +from paver.path import path +from paver import tasks + +__all__ = ['path', 'pushd'] + +@contextmanager +def pushd(dir): + '''A context manager (Python 2.5+ only) for stepping into a + directory and automatically coming back to the previous one. + The original directory is returned. Usage is like this:: + + from __future__ import with_statement + # the above line is only needed for Python 2.5 + + from paver.easy import * + + @task + def my_task(): + with pushd('new/directory') as old_dir: + ...do stuff... + ''' + old_dir = os.getcwd() + tasks.environment.info('cd %s' % dir) + os.chdir(dir) + try: + yield old_dir + tasks.environment.info('cd %s' % old_dir) + finally: + os.chdir(old_dir) diff -r 74b7ad049542 -r 3c2151124cee python-modules/paver/release.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/python-modules/paver/release.py Mon Jun 29 10:19:33 2009 -0700 @@ -0,0 +1,19 @@ +"""Release metadata for Paver.""" + +from paver.options import Bunch +from paver.tasks import VERSION + +setup_meta=Bunch( + name='Paver', + version=VERSION, + description='Easy build, distribution and deployment scripting', + long_description="""Paver is a Python-based build/distribution/deployment scripting tool along the +lines of Make or Rake. What makes Paver unique is its integration with +commonly used Python libraries. Common tasks that were easy before remain +easy. More importantly, dealing with *your* applications specific needs and +requirements is also easy.""", + author='Kevin Dangoor', + author_email='dangoor+paver@gmail.com', + url='http://www.blueskyonmars.com/projects/paver/', + packages=['paver', 'paver.cog'] +) diff -r 74b7ad049542 -r 3c2151124cee python-modules/paver/setuputils.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/python-modules/paver/setuputils.py Mon Jun 29 10:19:33 2009 -0700 @@ -0,0 +1,245 @@ +"""Integrates distutils/setuptools with Paver.""" + +import re +import os +import sys +import distutils +from fnmatch import fnmatchcase +from distutils.util import convert_path +from distutils import log +try: + from setuptools import dist +except ImportError: + from distutils import dist +from distutils.errors import DistutilsModuleError +_Distribution = dist.Distribution + +from distutils import debug +# debug.DEBUG = True + +from paver.options import Bunch + +try: + import setuptools + import pkg_resources + has_setuptools = True +except ImportError: + has_setuptools = False + +# our commands can have '.' in them, so we'll monkeypatch this +# expression +dist.command_re = re.compile (r'^[a-zA-Z]([a-zA-Z0-9_\.]*)$') + +from paver import tasks + +__ALL__ = ['find_package_data'] + +# find_package_data is an Ian Bicking creation. + +# Provided as an attribute, so you can append to these instead +# of replicating them: +standard_exclude = ('*.py', '*.pyc', '*~', '.*', '*.bak', '*.swp*') +standard_exclude_directories = ('.*', 'CVS', '_darcs', './build', + './dist', 'EGG-INFO', '*.egg-info') + +def find_package_data( + where='.', package='', + exclude=standard_exclude, + exclude_directories=standard_exclude_directories, + only_in_packages=True, + show_ignored=False): + """ + Return a dictionary suitable for use in ``package_data`` + in a distutils ``setup.py`` file. + + The dictionary looks like:: + + {'package': [files]} + + Where ``files`` is a list of all the files in that package that + don't match anything in ``exclude``. + + If ``only_in_packages`` is true, then top-level directories that + are not packages won't be included (but directories under packages + will). + + Directories matching any pattern in ``exclude_directories`` will + be ignored; by default directories with leading ``.``, ``CVS``, + and ``_darcs`` will be ignored. + + If ``show_ignored`` is true, then all the files that aren't + included in package data are shown on stderr (for debugging + purposes). + + Note patterns use wildcards, or can be exact paths (including + leading ``./``), and all searching is case-insensitive. + + This function is by Ian Bicking. + """ + + out = {} + stack = [(convert_path(where), '', package, only_in_packages)] + while stack: + where, prefix, package, only_in_packages = stack.pop(0) + for name in os.listdir(where): + fn = os.path.join(where, name) + if os.path.isdir(fn): + bad_name = False + for pattern in exclude_directories: + if (fnmatchcase(name, pattern) + or fn.lower() == pattern.lower()): + bad_name = True + if show_ignored: + print >> sys.stderr, ( + "Directory %s ignored by pattern %s" + % (fn, pattern)) + break + if bad_name: + continue + if os.path.isfile(os.path.join(fn, '__init__.py')): + if not package: + new_package = name + else: + new_package = package + '.' + name + stack.append((fn, '', new_package, False)) + else: + stack.append((fn, prefix + name + '/', package, only_in_packages)) + elif package or not only_in_packages: + # is a file + bad_name = False + for pattern in exclude: + if (fnmatchcase(name, pattern) + or fn.lower() == pattern.lower()): + bad_name = True + if show_ignored: + print >> sys.stderr, ( + "File %s ignored by pattern %s" + % (fn, pattern)) + break + if bad_name: + continue + out.setdefault(package, []).append(prefix+name) + return out + +class DistutilsTask(tasks.Task): + def __init__(self, distribution, command_name, command_class): + name_sections = str(command_class).split(".") + if name_sections[-2] == name_sections[-1]: + del name_sections[-2] + self.name = ".".join(name_sections) + self.__name__ = self.name + self.distribution = distribution + self.command_name = command_name + self.shortname = _get_shortname(command_name) + self.command_class = command_class + self.option_names = set() + self.needs = [] + self.user_options = command_class.user_options + # Parse distutils config files. + distribution.parse_config_files() + + def __call__(self, *args, **kw): + options = tasks.environment.options.get(self.shortname, {}) + opt_dict = self.distribution.get_option_dict(self.command_name) + for (name, value) in options.items(): + opt_dict[name.replace('-', '_')] = ("command line", value) + self.distribution.run_command(self.command_name) + + @property + def description(self): + return self.command_class.description + +def _get_shortname(taskname): + dotindex = taskname.rfind(".") + if dotindex > -1: + command_name = taskname[dotindex+1:] + else: + command_name = taskname + return command_name + +class DistutilsTaskFinder(object): + def get_task(self, taskname): + dist = _get_distribution() + command_name = _get_shortname(taskname) + try: + command_class = dist.get_command_class(command_name) + except DistutilsModuleError: + return None + return DistutilsTask(dist, command_name, command_class) + + def get_tasks(self): + dist = _get_distribution() + if has_setuptools: + for ep in pkg_resources.iter_entry_points('distutils.commands'): + try: + cmdclass = ep.load(False) # don't require extras, we're not running + dist.cmdclass[ep.name] = cmdclass + except: + # on the Mac, at least, installing from the tarball + # via zc.buildout fails due to a problem in the + # py2app command + tasks.environment.info("Could not load entry point: %s", ep) + dist.get_command_list() + return set(DistutilsTask(dist, key, value) + for key, value in dist.cmdclass.items()) + +def _get_distribution(): + try: + return tasks.environment.distribution + except AttributeError: + dist = _Distribution(attrs=tasks.environment.options.get('setup', {})) + tasks.environment.distribution = dist + dist.script_name = tasks.environment.pavement_file + return dist + +def install_distutils_tasks(): + """Makes distutils and setuptools commands available as Paver tasks.""" + env = tasks.environment + if not hasattr(env, "_distutils_tasks_installed"): + env.task_finders.append(DistutilsTaskFinder()) + env._distutils_tasks_installed = True + +def setup(**kw): + """Updates options.setup with the keyword arguments provided, + and installs the distutils tasks for this pavement. You can + use paver.setuputils.setup as a direct replacement for + the distutils.core.setup or setuptools.setup in a traditional + setup.py.""" + install_distutils_tasks() + setup_section = tasks.environment.options.setdefault("setup", Bunch()) + setup_section.update(kw) + +def _error(message, *args): + """Displays an error message to the user.""" + tasks.environment.error(message, *args) + +def _info(message, *args): + """Displays a message to the user. If the quiet option is specified, the + message will not be displayed.""" + tasks.environment.info(message, *args) + +def _debug(message, *args): + """Displays a message to the user, but only if the verbose flag is + set.""" + tasks.environment.debug(message, *args) + +def _base_log(level, message, *args): + """Displays a message at the given log level""" + tasks.environment._log(level, message, args) + +# monkeypatch the distutils logging to go through Paver's logging +log.log = _base_log +log.debug = _debug +log.info = _info +log.warn = _error +log.error = _error +log.fatal = _error + + +if has_setuptools: + __ALL__.extend(["find_packages"]) + + from setuptools import find_packages +else: + import distutils.core + diff -r 74b7ad049542 -r 3c2151124cee python-modules/paver/tasks.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/python-modules/paver/tasks.py Mon Jun 29 10:19:33 2009 -0700 @@ -0,0 +1,618 @@ +import sys +import os +import optparse +import types +import inspect +import itertools +import traceback + +VERSION = "1.0.1" + +class PavementError(Exception): + """Exception that represents a problem in the pavement.py file + rather than the process of running a build.""" + pass + +class BuildFailure(Exception): + """Represents a problem with some part of the build's execution.""" + pass + + +class Environment(object): + _task_in_progress = None + _task_output = None + _all_tasks = None + _dry_run = False + verbose = False + interactive = False + quiet = False + _file = "pavement.py" + + def __init__(self, pavement=None): + self.pavement = pavement + self.task_finders = [] + try: + # for the time being, at least, tasks.py can be used on its + # own! + from paver import options + self.options = options.Namespace() + self.options.dry_run = False + self.options.pavement_file = self.pavement_file + except ImportError: + pass + + def info(self, message, *args): + self._log(2, message, args) + + def debug(self, message, *args): + self._log(1, message, args) + + def error(self, message, *args): + self._log(3, message, args) + + def _log(self, level, message, args): + output = message % args + if self._task_output is not None: + self._task_output.append(output) + if level > 2 or (level > 1 and not self.quiet) or \ + self.verbose: + self._print(output) + + def _print(self, output): + print output + + def _exit(self, code): + sys.exit(1) + + def _set_dry_run(self, dr): + self._dry_run = dr + try: + self.options.dry_run = dr + except AttributeError: + pass + + def _get_dry_run(self): + return self._dry_run + + dry_run = property(_get_dry_run, _set_dry_run) + + def _set_pavement_file(self, pavement_file): + self._file = pavement_file + try: + self.options.pavement_file = pavement_file + except AttributeError: + pass + + def _get_pavement_file(self): + return self._file + + pavement_file = property(_get_pavement_file, _set_pavement_file) + + file = property(fset=_set_pavement_file) + + def get_task(self, taskname): + task = getattr(self.pavement, taskname, None) + + # delegate to task finders next + if not task: + for finder in self.task_finders: + task = finder.get_task(taskname) + if task: + break + + # try to look up by full name + if not task: + task = _import_task(taskname) + + # if there's nothing by full name, look up by + # short name + if not task: + all_tasks = self.get_tasks() + matches = [t for t in all_tasks + if t.shortname == taskname] + if len(matches) > 1: + matched_names = [t.name for t in matches] + raise BuildFailure("Ambiguous task name %s (%s)" % + (taskname, matched_names)) + elif matches: + task = matches[0] + return task + + def call_task(self, task_name): + task = self.get_task(task_name) + task() + + def _run_task(self, task_name, needs, func): + (funcargs, varargs, varkw, defaults) = inspect.getargspec(func) + kw = dict() + for i in xrange(0, len(funcargs)): + arg = funcargs[i] + if arg == 'env': + kw['env'] = self + # Keyword arguments do now need to be in the environment + elif (defaults is not None and + (i - (len(funcargs) - len(defaults))) >= 0): + pass + else: + try: + kw[arg] = getattr(self, arg) + except AttributeError: + raise PavementError("Task %s requires an argument (%s) that is " + "not present in the environment" % (task_name, arg)) + + if not self._task_in_progress: + self._task_in_progress = task_name + self._task_output = [] + running_top_level = True + else: + running_top_level = False + def do_task(): + self.info("---> " + task_name) + for req in needs: + task = self.get_task(req) + if not task: + raise PavementError("Requirement %s for task %s not found" % + (req, task_name)) + if not isinstance(task, Task): + raise PavementError("Requirement %s for task %s is not a Task" + % (req, task_name)) + if not task.called: + task() + return func(**kw) + if running_top_level: + try: + return do_task() + except Exception, e: + self._print(""" + +Captured Task Output: +--------------------- +""") + self._print("\n".join(self._task_output)) + if isinstance(e, BuildFailure): + self._print("\nBuild failed running %s: %s" % + (self._task_in_progress, e)) + else: + self._print(traceback.format_exc()) + self._task_in_progress = None + self._task_output = None + self._exit(1) + else: + return do_task() + + def get_tasks(self): + if self._all_tasks: + return self._all_tasks + result = set() + modules = set() + def scan_module(module): + modules.add(module) + for name in dir(module): + item = getattr(module, name, None) + if isinstance(item, Task): + result.add(item) + if isinstance(item, types.ModuleType) and item not in modules: + scan_module(item) + scan_module(self.pavement) + for finder in self.task_finders: + result.update(finder.get_tasks()) + self._all_tasks = result + return result + +environment_stack = [] +environment = Environment() + +def _import_task(taskname): + """Looks up a dotted task name and imports the module as necessary + to get at the task.""" + parts = taskname.split('.') + if len(parts) < 2: + return None + func_name = parts[-1] + full_mod_name = ".".join(parts[:-1]) + mod_name = parts[-2] + try: + module = __import__(full_mod_name, globals(), locals(), [mod_name]) + except ImportError: + return None + return getattr(module, func_name, None) + +class Task(object): + called = False + consume_args = False + no_auto = False + + __doc__ = "" + + def __init__(self, func): + self.func = func + self.needs = [] + self.__name__ = func.__name__ + self.shortname = func.__name__ + self.name = "%s.%s" % (func.__module__, func.__name__) + self.option_names = set() + self.user_options = [] + try: + self.__doc__ = func.__doc__ + except AttributeError: + pass + + def __call__(self, *args, **kw): + retval = environment._run_task(self.name, self.needs, self.func) + self.called = True + return retval + + def __repr__(self): + return "Task: " + self.__name__ + + @property + def parser(self): + options = self.user_options + parser = optparse.OptionParser(add_help_option=False, + usage="%%prog %s [options]" % (self.name)) + parser.disable_interspersed_args() + parser.add_option('-h', '--help', action="store_true", + help="display this help information") + + needs_tasks = [(environment.get_task(task), task) for task in self.needs] + for task, task_name in itertools.chain([(self, self.name)], needs_tasks): + if not task: + raise PavementError("Task %s needed by %s does not exist" + % (task_name, self)) + for option in task.user_options: + try: + longname = option[0] + if longname.endswith('='): + action = "store" + longname = longname[:-1] + else: + action = "store_true" + + environment.debug("Task %s: adding option %s (%s)" % + (self.name, longname, option[1])) + try: + if option[1] is None: + parser.add_option("--" + longname, action=action, + dest=longname.replace('-', '_'), + help=option[2]) + else: + parser.add_option("-" + option[1], + "--" + longname, action=action, + dest=longname.replace('-', '_'), + help=option[2]) + except optparse.OptionConflictError: + raise PavementError("""In setting command options for %r, +option %s for %r is already in use +by another task in the dependency chain.""" % (self, option, task)) + self.option_names.add((task.shortname, longname)) + except IndexError: + raise PavementError("Invalid option format provided for %r: %s" + % (self, option)) + return parser + + def display_help(self, parser=None): + if not parser: + parser = self.parser + + name = self.name + print "\n%s" % name + print "-" * (len(name)) + parser.print_help() + print + print self.__doc__ + print + + def parse_args(self, args): + import paver.options + environment.debug("Task %s: Parsing args %s" % (self.name, args)) + optholder = environment.options.setdefault(self.shortname, + paver.options.Bunch()) + parser = self.parser + options, args = parser.parse_args(args) + if options.help: + self.display_help(parser) + sys.exit(0) + + for task_name, option_name in self.option_names: + option_name = option_name.replace('-', '_') + try: + optholder = environment.options[task_name] + except KeyError: + optholder = paver.options.Bunch() + environment.options[task_name] = optholder + value = getattr(options, option_name) + if value is not None: + optholder[option_name] = getattr(options, option_name) + return args + + @property + def description(self): + doc = self.__doc__ + if doc: + period = doc.find(".") + if period > -1: + doc = doc[0:period] + else: + doc = "" + return doc + + +def task(func): + """Specifies that this function is a task. + + Note that this decorator does not actually replace the function object. + It just keeps track of the task and sets an is_task flag on the + function object.""" + if isinstance(func, Task): + return func + task = Task(func) + return task + +def needs(*args): + """Specifies tasks upon which this task depends. + + req can be a string or a list of strings with the names + of the tasks. You can call this decorator multiple times + and the various requirements are added on. You can also + call with the requirements as a list of arguments. + + The requirements are called in the order presented in the + list.""" + def entangle(func): + req = args + func = task(func) + needs_list = func.needs + if len(req) == 1: + req = req[0] + if isinstance(req, basestring): + needs_list.append(req) + elif isinstance(req, (list, tuple)): + needs_list.extend(req) + else: + raise PavementError("'needs' decorator requires a list or string " + "but got %s" % req) + return func + return entangle + +def cmdopts(options): + """Sets the command line options that can be set for this task. + This uses the same format as the distutils command line option + parser. It's a list of tuples, each with three elements: + long option name, short option, description. + + If the long option name ends with '=', that means that the + option takes a value. Otherwise the option is just boolean. + All of the options will be stored in the options dict with + the name of the task. Each value that gets stored in that + dict will be stored with a key that is based on the long option + name (the only difference is that - is replaced by _).""" + def entangle(func): + func = task(func) + func.user_options = options + return func + return entangle + +def consume_args(func): + """Any command line arguments that appear after this task on the + command line will be placed in options.args.""" + func = task(func) + func.consume_args = True + return func + +def no_auto(func): + """Specify that this task does not depend on the auto task, + and don't run the auto task just for this one.""" + func = task(func) + func.no_auto = True + return func + +def _preparse(args): + task = None + taskname = None + while args: + arg = args.pop(0) + if '=' in arg: + key, value = arg.split("=") + try: + environment.options.setdotted(key, value) + except AttributeError: + raise BuildFailure("""This appears to be a standalone Paver +tasks.py, so the build environment does not support options. The command +line (%s) attempts to set an option.""" % (args)) + elif arg.startswith('-'): + args.insert(0, arg) + break + else: + taskname = arg + task = environment.get_task(taskname) + if task is None: + raise BuildFailure("Unknown task: %s" % taskname) + break + return task, taskname, args + +def _parse_global_options(args): + # this is where global options should be dealt with + parser = optparse.OptionParser(usage= + """Usage: %prog [global options] taskname [task options] """ + """[taskname [taskoptions]]""", version="Paver %s" % (VERSION), + add_help_option=False) + + environment.help_function = parser.print_help + + parser.add_option('-n', '--dry-run', action='store_true', + help="don't actually do anything") + parser.add_option('-v', "--verbose", action="store_true", + help="display all logging output") + parser.add_option('-q', '--quiet', action="store_true", + help="display only errors") + parser.add_option("-i", "--interactive", action="store_true", + help="enable prompting") + parser.add_option("-f", "--file", metavar="FILE", + help="read tasks from FILE [%default]") + parser.add_option('-h', "--help", action="store_true", + help="display this help information") + parser.set_defaults(file=environment.pavement_file) + + parser.disable_interspersed_args() + options, args = parser.parse_args(args) + if options.help: + args.insert(0, "help") + for key, value in vars(options).items(): + setattr(environment, key, value) + + return args + +def _parse_command_line(args): + task, taskname, args = _preparse(args) + + if not task: + args = _parse_global_options(args) + if not args: + return None, [] + + taskname = args.pop(0) + task = environment.get_task(taskname) + + if not task: + raise BuildFailure("Unknown task: %s" % taskname) + + if not isinstance(task, Task): + raise BuildFailure("%s is not a Task" % taskname) + + if task.consume_args: + try: + environment.options.args = args + except AttributeError: + pass + environment.args = args + args = [] + else: + args = task.parse_args(args) + + return task, args + +def _cmp_task_names(a, b): + a = a.name + b = b.name + a_in_pavement = a.startswith("pavement.") + b_in_pavement = b.startswith("pavement.") + if a_in_pavement and not b_in_pavement: + return 1 + if b_in_pavement and not a_in_pavement: + return -1 + return cmp(a, b) + +def _group_by_module(items): + groups = [] + current_group_name = None + current_group = None + maxlen = 5 + for item in items: + name = item.name + dotpos = name.rfind(".") + group_name = name[:dotpos] + maxlen = max(len(item.shortname), maxlen) + if current_group_name != group_name: + current_group = [] + current_group_name = group_name + groups.append([group_name, current_group]) + current_group.append(item) + return maxlen, groups + +@task +@no_auto +@consume_args +def help(args, help_function): + """This help display.""" + if args: + task_name = args[0] + task = environment.get_task(task_name) + if not task: + print "Task not found: %s" % (task_name) + return + + task.display_help() + return + + help_function() + + task_list = environment.get_tasks() + task_list = sorted(task_list, cmp=_cmp_task_names) + maxlen, task_list = _group_by_module(task_list) + fmt = " %-" + str(maxlen) + "s - %s" + for group_name, group in task_list: + print "\nTasks from %s:" % (group_name) + for task in group: + print(fmt % (task.shortname, task.description)) + +def _process_commands(args, auto_pending=False): + first_loop = True + while True: + task, args = _parse_command_line(args) + if auto_pending: + if not task or not task.no_auto: + environment.call_task('auto') + auto_pending=False + if task is None: + if first_loop: + task = environment.get_task('default') + if not task: + break + else: + break + task() + first_loop = False + +def call_pavement(new_pavement, args): + if isinstance(args, basestring): + args = args.split() + global environment + environment_stack.append(environment) + environment = Environment() + cwd = os.getcwd() + dirname, basename = os.path.split(new_pavement) + environment.pavement_file = basename + try: + if dirname: + os.chdir(dirname) + _launch_pavement(args) + finally: + os.chdir(cwd) + environment = environment_stack.pop() + +def _launch_pavement(args): + mod = types.ModuleType("pavement") + environment.pavement = mod + + if not os.path.exists(environment.pavement_file): + environment.pavement_file = None + exec "from paver.easy import *\n" in mod.__dict__ + _process_commands(args) + return + + mod.__file__ = environment.pavement_file + try: + execfile(environment.pavement_file, mod.__dict__) + auto_task = getattr(mod, 'auto', None) + auto_pending = isinstance(auto_task, Task) + _process_commands(args, auto_pending=auto_pending) + except PavementError, e: + print "\n\n*** Problem with pavement:\n%s\n%s\n\n" % ( + os.path.abspath(environment.pavement_file), e) + +def main(args=None): + global environment + if args is None: + if len(sys.argv) > 1: + args = sys.argv[1:] + else: + args = [] + environment = Environment() + + # need to parse args to recover pavement-file to read before executing + try: + args = _parse_global_options(args) + _launch_pavement(args) + except BuildFailure, e: + environment.error("Build failed: %s", e) + sys.exit(1)