#!/bin/env python # CVSHistory -- A cvsweb/viewcvs-integrating CVS history # browsing script/web frontend/thingie. # Jamie Turner ##### USER EDITABLE SECTION ##### CONFIGFILE = "/home/rool/devel/python/cvshistory/cvshistory.conf" # END OF USER EDITABLE SECTION! ##### LICENSE ##### # Modified BSD # ------------ # # All of the documentation and software included in this software # is copyrighted by Jamie Turner # # Copyright 2004 Jamie Turner. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED # TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ViewCVS Look & feel modeled after the ViewCVS project # viewcvs.sourceforge.net # CVSweb Look & feel modeled after the FreeBSD CVSweb project # www.freebsd.org/projects/cvsweb.html ##### CODE ##### # Integration mode constants. INT_NONE = 0 # don't edit INT_VIEWCVS = 1 # don't edit INT_CVSWEB = 2 # don't edit # Predefined time formats. USTIME = "%m-%d-%Y %H:%M" WORLDTIME = "%d-%m-%Y %H:%M" ISOTIME = "%Y-%m-%d %H:%M" # Performance mode constants. MODE_FAST = 1 MODE_SLOW = 2 # Load configuration. execfile(CONFIGFILE) # let's handle the options above # first, formatting # we default to ViewCVS layout, with no integration. it's prettier! if INTEGRATION == INT_NONE or INTEGRATION == INT_VIEWCVS: _SORTEDCOL = "88ff88" _REGCOL = "cccccc" _ROWS = ["ffffff","ccccee"] _TABLETOP = '' else: _SORTEDCOL = "ffcc66" _REGCOL = "ffffcc" _ROWS = ["ffffff","ffffff"] _TABLETOP = '
' _SCRIPT = SCRIPTPATH _LOGO = " " _ICON = '[DIR]' if INTEGRATION == INT_VIEWCVS: _SCRIPT = SCRIPTPATH _LOGO = '(logo)' _ICON = '(dir)' elif INTEGRATION == INT_NONE: _LOGO = '' _ICON = '' # dependencies import time import os import urllib import urlparse import cgi import re import xml.sax.saxutils _VERSION = "2.2" # basic fields: # Operation Type(Operation Date)|username||directory|revision|fn _OPTYPES = { "O" : "Checkout" , "F" : "Release" , "T" : "RTag", "W" : "Delete on update", "U" : "Update", "P" : "Update by patch", "G" : "Merge on update", "C" : "Conflict on update", "M" : "Commit", "A" : "Addition", "R" : "Removal", "E" : "Export", } def op2text(op): return _OPTYPES.get(op, "Unknown (%s)" % op) _SELF_URL = os.environ['SCRIPT_NAME'] class ReverseReadline: def __init__(self,fd,BUFSIZ=262144): self.fd = fd self.ind = 0 self.BUFSIZ = BUFSIZ self.fd.seek(0,2) self.siz = self.fd.tell() self.buff = "" self.ind = 0 self.scount = -1 self.eof = 0 self.first = 1 def readline(self): # find the start index nind = self.buff.rfind("\n",0,self.ind-1) if nind != -1 and self.ind != 0: ret = self.buff[nind+1:self.ind+1] self.ind = nind return ret # oops.. outta newlines # keep the existing data old = self.buff[:self.ind+1] # can't seek backward anymore? if self.eof: self.buff = "" return old # seek to the right location sksiz = self.BUFSIZ * self.scount if sksiz * -1 > self.siz: sksiz = self.siz * -1 self.eof = 1 rd = self.siz - ( ( (self.scount * -1) - 1) * self.BUFSIZ) else: rd = self.BUFSIZ self.fd.seek(sksiz,2) self.buff = self.fd.read(rd) if self.first: self.ind = rd - 1 self.first = 0 else: self.ind = rd self.scount -= 1 rl = self.readline() if rl[-1] == "\n": return old + rl else: return rl + old # fast mode, for big servers. No sorting! def get_history_fast(conds,opts): reader = ReverseReadline(open(HISTORY[opts["cvsroot"]])) # datelimit lasttime = time.time() if LIMITDAYS: ltm = lasttime ltm -= (86400 * LIMITDAYS) else: ltm = 0 offset = 0 if opts.has_key("offset"): offset = int(opts["offset"]) data = [] skip = 0 line = reader.readline() while len(data) < PERPAGE and line and lasttime > ltm: cur = line.strip().split("|",5) # maxsplit @ 5 # we play a little game here. we don't care about the data # at index two, so we'll put the time there lasttime = get_time(cur[0][1:]) cur[2] = lasttime cur[0] = cur[0][0] # the op code failed = 0 for cond in conds: if not cond.test(cur): failed = 1 break if not failed and skip < offset: skip += 1 elif not failed and lasttime > ltm: data.append(cur) line = reader.readline() return data # this is a bit uneasy for especially large logs, but.. # we'll worry about that later. def get_history(opts): # datelimit lasttime = time.time() if LIMITDAYS: ltm = lasttime ltm -= (86400 * LIMITDAYS) else: ltm = 0 fd = open(HISTORY[opts["cvsroot"]],"r") data = [] line = fd.readline() while line: cur = line.strip().split("|",5) # we play a little game here. we don't care about the data # at index two, so we'll put the time there lasttime = get_time(cur[0][1:]) cur[2] = lasttime cur[0] = cur[0][0] # the op code if lasttime > ltm: data.append(cur) line = fd.readline() fd.close() # data is: # op:user:time:dir:revision:file/module # * number of entries data.reverse() return data _OTYPE_CHECKBOX = 1 _OTYPE_TEXT = 2 _OTYPE_SELECT = 3 def opt_pre(opts,type,opt,opt2=None): if type == _OTYPE_CHECKBOX: if opts.has_key(opt) and opts[opt] == "on": return "CHECKED" elif type == _OTYPE_TEXT: if opts.has_key(opt) and opts[opt].strip() != "": return "value=\"%s\"" % opts[opt] elif type == _OTYPE_SELECT: if opts.has_key(opt) and opts[opt] == opt2: return "SELECTED" return "" def get_time(hextime): tm = 0 ht_len = len(hextime) for x in xrange(0,ht_len): d = ord(hextime[x]) - 48 if d > 9: d -= 39 tm += d * (16 ** (ht_len - x - 1 ) ) return tm _COLUMNS = ["Date","User","Operation","Directory","File","Revision"] def get_script_absolute_url (): port = "" if os.environ.get('HTTPS') or os.environ['SERVER_PROTOCOL'][:5] == 'HTTPS': url = 'https://' if os.environ['SERVER_PORT'] != '443': port = os.environ['SERVER_PORT'] else: url = 'http://' if os.environ['SERVER_PORT'] != '8080': port = os.environ['SERVER_PORT'] url += os.environ['SERVER_NAME'] if port: url += ':' + port url += _SELF_URL return url def pretty_print_rss(data,options): cvsHistoryURL = get_script_absolute_url () print 'Content-Type: text/xml; charset=iso-8859-1' print print \ ''' CVSHistory %s CVS Changelog History http://blogs.law.harvard.edu/tech/rss CVSHistory %s en ''' % (cvsHistoryURL, SITE_ADMIN) for row in data: fileName = row[5] dirName = row[3] author = row[1] revision = row[4] op = op2text(row[0]) if fileName == dirName: link = '%s/%s/' % (_SCRIPT, dirName) else: link = '%s/%s/%s' % (_SCRIPT, dirName, fileName) link = urlparse.urljoin (cvsHistoryURL, link) title = fileName description = '''%s: %s %s (%s)''' % (author, op, revision, dirName) milliseconds = row[2] date = time.strftime ('%a, %d %b %Y %H:%M:%S %Z', time.localtime (milliseconds)) print \ ''' %s %s %s %s %s %s''' % ( xml.sax.saxutils.escape (title), xml.sax.saxutils.escape (description), xml.sax.saxutils.escape (link), op, date, author + '/' + op + '/' + fileName + '/' + str (milliseconds) ) if AUTHOR_EMAIL_DOMAIN: print " %s@%s (%s)" % (author, AUTHOR_EMAIL_DOMAIN, author) print ' ' print \ ''' ''' def pretty_print(data,options): if options.has_key("rss"): pretty_print_rss(data,options) return # set up paging offset = 0 if options.has_key("offset"): try: offset = int(options["offset"]) except: offset = 0 pstr = "" nstr = "" if PERFORMANCE == MODE_SLOW: sortby = "Date" if options.has_key("sortby"): sortby = options["sortby"] nend = PERPAGE if len(data) - offset - PERPAGE < PERPAGE: nend = len(data) - offset - PERPAGE if offset - PERPAGE >= 0: pstr = '' % ( _SELF_URL,getstring(options,["offset","%d" % (offset - PERPAGE)]),PERPAGE) if offset + PERPAGE < len(data): nstr = '' % ( _SELF_URL,getstring(options,["offset","%d" % (offset + PERPAGE)]),nend) cend = offset + PERPAGE if cend > len(data): cend = len(data) ofstr = " of %s" % len(data) srange = offset erange = cend elif PERFORMANCE == MODE_FAST: if offset >= PERPAGE: pstr = '' % ( _SELF_URL,getstring(options,["offset","%d" % (offset - PERPAGE)])) nstr = '' % ( _SELF_URL,getstring(options,["offset","%d" % (offset + PERPAGE)])) cend = offset + PERPAGE if cend - offset > len(data): cend = len(data) + offset ofstr = "" srange = 0 erange = len(data) rssURL = get_script_absolute_url () rssURL += '?' form = cgi.FieldStorage() for paramName in form.keys(): paramValue = form[paramName].value if type(paramValue) == type([]): paramValue = paramValue[0] rssURL += paramName + '=' + urllib.quote (paramValue) + '&' rssURL += 'rss=1' print \ '''Content-Type: text/html\r \r CVSHistory
( Previous %s )( Next %s )( Newer Entries )( Older Entries )

CVSHistory

%s

User:
Regular expression?  
  Revision:
File:
Regular expression?  
  Date:        format: %s or %s
Directory:
Regular expression?  
Include subdirectories?  
 
Operations:      Addition      Checkout      Commit      Conflict on update 
Delete on update      Merge on update      Release      Removal      Rtag      Update      Update by patch     
(Reset/View All)
%s

%s%s
Showing records %s-%s%s
%s ''' % (cgi.escape(rssURL), _LOGO, _SELF_URL, opt_pre(options,_OTYPE_TEXT,"usearch"), opt_pre(options,_OTYPE_CHECKBOX,"usearchre"), opt_pre(options,_OTYPE_SELECT,"revsel1","na"), opt_pre(options,_OTYPE_SELECT,"revsel1","eq"), opt_pre(options,_OTYPE_SELECT,"revsel1","gt"), opt_pre(options,_OTYPE_SELECT,"revsel1","lt"), opt_pre(options,_OTYPE_TEXT,"revval1"), opt_pre(options,_OTYPE_SELECT,"revsel2","na"), opt_pre(options,_OTYPE_SELECT,"revsel2","eq"), opt_pre(options,_OTYPE_SELECT,"revsel2","gt"), opt_pre(options,_OTYPE_SELECT,"revsel2","lt"), opt_pre(options,_OTYPE_TEXT,"revval2"), opt_pre(options,_OTYPE_TEXT,"fsearch"), opt_pre(options,_OTYPE_CHECKBOX,"fsearchre"), #date break... time.strftime(TIMEFORMAT,time.localtime(time.time())), time.strftime(TIMEFORMAT[:-6],time.localtime(time.time())) , opt_pre(options,_OTYPE_SELECT,"datesel1","na"), opt_pre(options,_OTYPE_SELECT,"datesel1","eq"), opt_pre(options,_OTYPE_SELECT,"datesel1","gt"), opt_pre(options,_OTYPE_SELECT,"datesel1","lt"), opt_pre(options,_OTYPE_TEXT,"dateval1"), opt_pre(options,_OTYPE_TEXT,"dsearch"), opt_pre(options,_OTYPE_CHECKBOX,"dsearchre"), opt_pre(options,_OTYPE_CHECKBOX,"dsearchsub"), opt_pre(options,_OTYPE_SELECT,"datesel2","na"), opt_pre(options,_OTYPE_SELECT,"datesel2","eq"), opt_pre(options,_OTYPE_SELECT,"datesel2","gt"), opt_pre(options,_OTYPE_SELECT,"datesel2","lt"), opt_pre(options,_OTYPE_TEXT,"dateval2"), opt_pre(options,_OTYPE_SELECT,"selop","na"), opt_pre(options,_OTYPE_SELECT,"selop","in"), opt_pre(options,_OTYPE_SELECT,"selop","out"), opt_pre(options,_OTYPE_CHECKBOX,"opA"), opt_pre(options,_OTYPE_CHECKBOX,"opO"), opt_pre(options,_OTYPE_CHECKBOX,"opM"), opt_pre(options,_OTYPE_CHECKBOX,"opC"), opt_pre(options,_OTYPE_CHECKBOX,"opW"), opt_pre(options,_OTYPE_CHECKBOX,"opG"), opt_pre(options,_OTYPE_CHECKBOX,"opF"), opt_pre(options,_OTYPE_CHECKBOX,"opR"), opt_pre(options,_OTYPE_CHECKBOX,"opT"), opt_pre(options,_OTYPE_CHECKBOX,"opU"), opt_pre(options,_OTYPE_CHECKBOX,"opP"), # so we submit to the same place! not options["cvsroot"] and _SELF_URL or (_SELF_URL + ("?cvsroot=%s" % options["cvsroot"])), # so we submit to the same place! not options["cvsroot"] and "" or ( '' % options["cvsroot"]), # paging offset + 1, cend, ofstr, pstr,nstr, _TABLETOP, ) options["offset"] = "%d" % offset for x in xrange(0,len(_COLUMNS)): if ( (PERFORMANCE == MODE_SLOW and _COLUMNS[x] == sortby) or (PERFORMANCE == MODE_FAST and _COLUMNS[x] == "Date") ): print '''%s ''' % (_SORTEDCOL, _COLUMNS[x]) elif PERFORMANCE == MODE_FAST: print '''%s ''' % (_REGCOL, _COLUMNS[x]) else: print '''%s ''' % (_REGCOL, _SELF_URL, getstring(options,["sortby",_COLUMNS[x]]),_COLUMNS[x]) print "\n" for x in xrange(srange,erange): fn = data[x][5] dn = data[x][3] if fn == dn: fn = "N/A" elif INTEGRATION: fn = '%s' % ( _SCRIPT, dn ,fn,fn ) if INTEGRATION: dn = '%s%s' % ( _SCRIPT,dn,_ICON,dn ) rev = data[x][4] if rev == '': rev = "N/A" print \ ''' %s %s %s %s %s %s ''' % (_ROWS[x % 2], # background color time.strftime(TIMEFORMAT,time.localtime(data[x][2])), #Date data[x][1], # user op2text(data[x][0]), # operation dn, # directory fn, # file/module rev, # revision ) print \ '''
RSS''' % cgi.escape(rssURL) if PUBLIC_SERVER: print ' Subscribe' % urllib.quote (rssURL) print \ ''' Powered by
CVSHistory %s
''' % _VERSION def getstring(options,add=None,ignore=[]): if add: options[add[0]] = add[1] gs = [] for k in options.keys(): if not k in ignore: gs.append("%s=%s" % (k,urllib.quote_plus(options[k]))) return cgi.escape("&".join(gs), 1) def get_options(): form = cgi.FieldStorage() opts = {} for o in form.keys(): d = form[o].value if type(d) == type([]): d = d[0] opts[o] = d if not opts.has_key("cvsroot") or not HISTORY.has_key(opts["cvsroot"]): opts["cvsroot"] = "" return opts def sm_user(a,b): if a[1].upper() < b[1].upper(): return -1 if a[1].upper() == b[1].upper(): return 0 return 1 def sm_op(a,b): oa = op2text(a[0]) ob = op2text(b[0]) if oa < ob: return -1 if oa == ob: return 0 return 1 def sm_dir(a,b): if a[3].upper() < b[3].upper(): return -1 if a[3].upper() == b[3].upper(): return 0 return 1 def sm_fn(a,b): if a[3] == a[5]: return 1 if a[5].upper() < b[5].upper(): return -1 if a[5].upper() == b[5].upper(): return 0 return 1 def str_to_float(a): if a == '': return 0.0 return float(a) def cmp_rev(a,b): al = map(str_to_float, a.split('.')) bl = map(str_to_float, b.split('.')) if al > bl: return 1 if al < bl: return -1 return 0 def sm_rev(a,b): return cmp_rev(a[4],b[4]) _SORTMETHODS = { "User" : sm_user, "Operation" : sm_op, "Directory" : sm_dir, "File" : sm_fn, "Revision" : sm_rev, } #conds = [ _CTYPE_MATCH = 1 _CTYPE_REMATCH = 2 _CTYPE_DATE = 3 _CTYPE_REV = 4 _CTYPE_OPS = 5 _ARG2_OUT = 0 _ARG2_IN = 1 _ARG2_EQ = 2 _ARG2_GT = 3 _ARG2_LT = 4 _ARG2_SEQ = 5 _ARG2_NUM_GT = 6 _ARG2_NUM_LT = 7 class Condition: def __init__(self,type,dfield,arg,arg2=None): self.dfield = dfield self.error = None if type == _CTYPE_MATCH: self.arg = arg.strip() self.test = self.MATCH_test elif type == _CTYPE_REMATCH: try: self.arg = re.compile(arg) self.test = self.REMATCH_test except: self.error = "invalid regular expression" elif type == _CTYPE_DATE: try: arg = arg.strip() arg = re.sub(" +"," ",arg) if arg.count(" "): self.arg = time.mktime(time.strptime(arg,TIMEFORMAT)) longdate = 1 else: self.arg = time.mktime(time.strptime(arg,TIMEFORMAT[:-6])) longdate = 0 self.test = self.COMP_test except: self.error = "invalid date" if arg2 == "eq": if longdate: self.arg2 = _ARG2_EQ else: self.arg2 = _ARG2_SEQ elif arg2 == "gt": self.arg2 = _ARG2_GT elif arg2 == "lt": self.arg2 = _ARG2_LT else: self.error = "unexpected comparison specification for date" elif type == _CTYPE_REV: self.arg = arg self.test = self.COMP_test if arg2 == "eq": self.arg2 = _ARG2_EQ elif arg2 == "gt": self.arg2 = _ARG2_NUM_GT elif arg2 == "lt": self.arg2 = _ARG2_NUM_LT else: self.error = "unexpected comparison specification for revisions" elif type == _CTYPE_OPS: self.arg = arg self.test = self.OPS_test if arg2 == "in": self.arg2 = _ARG2_IN elif arg2 == "out": self.arg2 = _ARG2_OUT else: self.error = "unexpected argument for operation exclusion/inclusion" def MATCH_test(self,dataitem): if dataitem[self.dfield] == self.arg: return 1 return 0 def REMATCH_test(self,dataitem): if self.arg.search(dataitem[self.dfield]): return 1 return 0 def COMP_test(self,dataitem): if self.arg2 == _ARG2_SEQ: # we have a date... dt = time.localtime(dataitem[self.dfield]) mt = dt[:3] + (0,0,0) + dt[6:] if time.mktime(mt) == self.arg: return 1 return 0 elif self.arg2 == _ARG2_EQ: if dataitem[self.dfield] == self.arg: return 1 return 0 elif self.arg2 == _ARG2_GT: if dataitem[self.dfield] > self.arg: return 1 return 0 elif self.arg2 == _ARG2_LT: if dataitem[self.dfield] < self.arg: return 1 return 0 elif self.arg2 == _ARG2_NUM_GT: if cmp_rev(dataitem[self.dfield], self.arg) == 1: return 1 return 0 elif self.arg2 == _ARG2_NUM_LT: if cmp_rev(dataitem[self.dfield], self.arg) == -1: return 1 return 0 return 0 def OPS_test(self,dataitem): if dataitem[self.dfield] in self.arg: return 1 * self.arg2 # will 0 out if OUT return 1 - self.arg2 # inverse def get_conds(opts): conds = [] if not HISTORY.get(opts["cvsroot"]): return "Error: unknown cvsroot: '%s'" % opts["cvsroot"] for opt in opts.keys(): # username madecond = 0 if opt == "usearch" and opts[opt].strip() != "": madecond = 1 if opts.has_key("usearchre") and opts["usearchre"] == "on": conds.append(Condition(_CTYPE_REMATCH,1,opts["usearch"]) ) else: conds.append(Condition(_CTYPE_MATCH,1,opts["usearch"]) ) if opt == "fsearch" and opts[opt].strip() != "": madecond = 1 if opts.has_key("fsearchre") and opts["fsearchre"] == "on": conds.append(Condition(_CTYPE_REMATCH,5,opts["fsearch"]) ) else: conds.append(Condition(_CTYPE_MATCH,5,opts["fsearch"]) ) elif opt == "dsearch" and opts[opt].strip() != "": madecond = 1 if opts.has_key("dsearchre") and opts["dsearchre"] == "on": conds.append(Condition(_CTYPE_REMATCH,3,opts["dsearch"]) ) else: conds.append(Condition(_CTYPE_MATCH,3,opts["dsearch"]) ) elif opt == "dsearch" and opts[opt].strip() != "": madecond = 1 if opts.has_key("dsearchre") and opts["dsearchre"] == "on": conds.append(Condition(_CTYPE_REMATCH,3,opts["dsearch"]) ) else: conds.append(Condition(_CTYPE_MATCH,3,opts["dsearch"]) ) elif opt == "revsel1" and opts[opt].strip() != "na": madecond = 1 if not opts.has_key("revval1") or opts["revval1"].strip() == "": return "Error processing revision: please include revision value" conds.append(Condition(_CTYPE_REV,4,opts["revval1"],opts["revsel1"]) ) elif opt == "revsel2" and opts[opt].strip() != "na": madecond = 1 if not opts.has_key("revval2") or opts["revval2"].strip() == "": return "Error processing revision: please include revision value" conds.append(Condition(_CTYPE_REV,4,opts["revval2"],opts["revsel2"]) ) elif opt == "datesel1" and opts[opt].strip() != "na": madecond = 1 if not opts.has_key("dateval1") or opts["dateval1"].strip() == "": return "Error processing date: please include date value" conds.append(Condition(_CTYPE_DATE,2,opts["dateval1"],opts["datesel1"]) ) elif opt == "datesel2" and opts[opt].strip() != "na": madecond = 1 if not opts.has_key("dateval2") or opts["dateval2"].strip() == "": return "Error processing date: please include date value" conds.append(Condition(_CTYPE_DATE,2,opts["dateval2"],opts["datesel2"]) ) elif opt == "selop" and opts["selop"] != "na": madecond = 1 ops_arg = [] for inkey in opts.keys(): if inkey[:2] == "op" and opts[inkey] == "on": ops_arg.append(inkey[2]) conds.append(Condition(_CTYPE_OPS,0,"".join(ops_arg),opts["selop"]) ) if madecond and conds[-1].error: return "Error processing form input: " + conds[-1].error return conds def limit(data,conds): x = 0 while x < len(data): for c in conds: if not c.test(data[x]): del data[x] x -= 1 break x += 1 return data def correct_opts(opts): # revision if opts.has_key("revsel1") and opts["revsel1"] == "na": opts["revval1"] = "" if opts.has_key("revsel2") and opts["revsel2"] == "na": opts["revval2"] = "" if opts.has_key("datesel1") and opts["datesel1"] == "na": opts["dateval1"] = "" if opts.has_key("datesel2") and opts["datesel2"] == "na": opts["dateval2"] = "" if opts.has_key("selop") and opts["selop"] == "na": for ok in opts.keys(): if ok[:2] == "op": opts[ok] = "off" return opts def error(mess): print \ '''Content-Type: text/html\r \r CVSHistory -- Error

Error

CVSHistory encountered an error on form input:
%s''' % cgi.escape(mess) def go_slow(): opts = get_options() data = get_history(opts) # fix directory recursion if opts.has_key("dsearchsub") and opts["dsearchsub"] == "on": if not opts.has_key("dsearchre") or opts["dsearchre"] != "on": opts["dsearch"] = re.escape(opts["dsearch"]) opts["dsearchre"] = "on" opts["dsearch"] = "^" + opts["dsearch"] + ".*" opts["dsearchsub"] = "off" printed = 0 conds = None if opts.has_key("limit") and opts["limit"] == "1": conds = get_conds(opts) if type(conds) == type(""): error(conds) printed = 1 else: opts = correct_opts(opts) data = limit(data,conds) # sorting if not printed: if opts.has_key("sortby") and opts["sortby"] != "Date": data.sort(_SORTMETHODS[opts["sortby"]]) pretty_print(data,opts) #fast mode def go_fast(): opts = get_options() # fix directory recursion if opts.has_key("dsearchsub") and opts["dsearchsub"] == "on": if not opts.has_key("dsearchre") or opts["dsearchre"] != "on": opts["dsearch"] = re.escape(opts["dsearch"]) opts["dsearchre"] = "on" opts["dsearch"] = "^" + opts["dsearch"] + ".*" opts["dsearchsub"] = "off" printed = 0 conds = [] if opts.has_key("limit") and opts["limit"] == "1": conds = get_conds(opts) if type(conds) == type(""): error(conds) printed = 1 else: opts = correct_opts(opts) if not printed: data = get_history_fast(conds,opts) pretty_print(data,opts) if __name__ == "__main__": if PERFORMANCE == MODE_FAST: go_fast() if PERFORMANCE == MODE_SLOW: go_slow()