#!/bin/env python
# cvslog2web by Ethan Tira-Thompson
# Released under the GPL (http://www.gnu.org/copyleft/gpl.html)
# $Date: 2009/01/06 19:59:07 $
# Provides syndicated (Atom) and HTML output from CVS commit logs
SCRIPT_REVISION="$Revision: 1.16 $"
SCRIPT_URL="http://ethan.tira-thompson.com/cvslog2web"
################################################################################
################ INSTALLATION ################
################################################################################
# To install this script, copy it into the CVSROOT directory of your
# repository, and then add the following line to CVSROOT/loginfo:
# ALL python $CVSROOT/CVSROOT/cvslog2web.py $CVSROOT %{sVv} [config-file]
# Don't forget you can replace 'ALL' with a filter to only apply
# cvslog2web to certain modules, or apply different copies of cvslog2web
# (presumably with different configuration settings) to different modules
# The optional config file can hold the configuration parameters shown below
# This is convenient to use the same script with different settings for
# different modules (as opposed to copying the script itself)
# You probably also received some stylesheet and images. The default
# placement for these files depends on the directory layout of your website:
# Atom.css -- same directory as the feed output (FEEDOUT)
# cvslog2web.css -- HTMLOUT_CSS and PERMALINK_CSS, default to same directory as
# the html page (HTMLOUT), and root of the permalink structure respectively
# nav_*.gif -- same directory as PERMALINK_CSS
# *.png -- Root of permalink structure (PERMALINK_URL_PREFIX)
################################################################################
################ CONFIGURATION ################
################################################################################
import os,sys
# Given the command line suggested above:
# arg 0 will be the script path (unused)
if len(sys.argv)>2:
root=sys.argv[1] # arg 1 will be the repository root (CVSROOT)
args=sys.argv[2].split(" ") # arg 2 is the directory of the commit followed by a list of files (space delimited)
# arg 3 is an optional configuration file parameter, which will override the defaults show below
# arg 3 is handled at the end of the configuration section
#### These first three settings control the destination of the output ####
#### Set to None or empty string ("") to disable that format's generation ####
# This is the directory to hold entries as individual html files
# If PERMALINKDIR is disabled, cannot generate links from the feed and HTML
# to permanent log entries (but that's perfectly legal to do)
# PERMALINKDIR should essentially be the root of your webserver, can use
# PERMALINK_STRUCTURE setting (below) to subdivide files into subdirectories
PERMALINKDIR="/Users/ejt/Sites/"
# where to direct the Atom feed output, relative paths are interpreted from PERMALINKDIR
FEEDOUT="cvs.xml"
# will hold the most recent MAXHISTORY entries, relative paths are interpreted from PERMALINKDIR
# This is intended as a quick list of recent entries, suitable for including into a larger page via SSI or frames
HTMLOUT="recent.html"
# Defaults sets the tmp directory to be created in the same location as the script
# This directory will hold status files between commits
# Be sure to think through moving this outside CVSROOT (all commiters need access to the same files)
TMPDIR=os.path.join(os.path.dirname(sys.argv[0]),"cvslog2web-tmp")
# maximum amount of time (seconds) in which checkins with the same message will be grouped together
TIMEOUT=15*60
# number of checkins to retain
# this will be reset to max(MAXHISTORY,FEED_MAXHISTORY,HTMLOUT_MAXHISTORY)
MAXHISTORY=0
# domain to use for entry id tags and default email addresses
DOMAIN="example.org"
# viewcvs integration -- links from output to diffs
# format has 4 fields available: filename, path, oldversion, newversion
DIFFLINKFORMAT="http://www.example.org/cgi/viewcvs.cgi/%(path)s.diff?r1=%(oldversion)s&r2=%(newversion)s"
# For adds and removes, no diff is available, one of oldversion or newversion
# will be set (depending on add or remove), and the other will be an empty string
# Note how this default handles this by running them both together so it doesn't matter which is set
VIEWLINKFORMAT="http://www.example.org/cgi/viewcvs.cgi/%(path)s?rev=%(oldversion)s%(newversion)s&content-type=text/vnd.viewcvs-markup"
# entry titles can be a file list or the first line of the commit message
TITLE_FILE_LIST, TITLE_MESSAGE_FIRST_LINE=range(2)
ENTRYTITLE=TITLE_MESSAGE_FIRST_LINE
# if using message as title (TITLE_MESSAGE_FIRST_LINE), can either
# repeat that line again in the content, or just skip it
REPEAT_ALWAYS, REPEAT_WHEN_MULTIPLE, REPEAT_NEVER=range(3)
REPEAT_FIRST_LINE=REPEAT_WHEN_MULTIPLE
# contact list, user names are looked up here, if not found, will fall
# back to information obtained from the system (password database and DOMAIN)
CONTACT={
"ejt":("Ethan Tira-Thompson","ejt@cs.cmu.edu"),
"guest":("Guest",None) #use None or empty string ('') to hide email
}
# Can drop the module name from paths in the file list
# Useful when you only have one module in the repository, or are filtering
# the input being passed to cvslog2web from the loginfo file
# Value is the depth to drop, so 0 drops nothing, 1 drops the first directory, etc.
DROP_MODULE=0
# controls whether feedback is given as each type of output is produced
VERBOSE=True
################ ATOM feed customization ################
FEEDTITLE="cvslog2web CVS" # title for the feed
FEEDDESCRIPTION="" # a short description of the feed
FEEDHOMELINK="http://www.example.org/" # a link to the "main" page
FEEDLOGO="" # spec says this image should be twice as wide as it is tall
FEEDICON="" # spec says this should be squared (as wide as tall)
FEEDENTRYPREFIX="Commit: " # prefix for entry titles in the feed output
# Controls generation of a list of tags for each file
# (not shown by many readers, and generally duplicates the file list in those that do.)
FEEDRELATED=False
FEED_MAXHISTORY=15
# FEEDSELFLINK *should* be set, but FEEDID *must* be set... if you don't
# provide FEEDSELFLINK, you will instead need to fill in FEEDID
# Further, the HTML index and permalink pages require the self link if you want them to contain links to the feed
# (why two separate settings? If you move the link, keep the link as the ID to maintain identity)
# These must be full, absolute URLs, not a relative path!
FEEDSELFLINK="http://www.example.org/cvs.xml" #self-link for the feed
FEEDID="" # a globally unique ID for the feed -- if empty, will be set to FEEDSELFLINK
################ HTML output customization ################
HTMLOUT_STANDALONE=True # whether to make the root element to stand alone (vs. included via server-side include)
HTMLOUT_CSS="cvslog2web.css" # style sheet to include (only applies if HTML_STANDALONE is True)
HTMLTITLE="" # title for the html output, will fall back to FEEDTITLE if blank, use None to disable
HTMLOUT_ENTRYPREFIX="" # prefix for title lines; does *not* fall back on FEEDENTRYPREFIX
HTMLOUT_MESSAGE=True # set to False to skip the log body
HTMLOUT_FILELIST=True # set to False to skip the file list (also skipped if ENTRYTITLE==TITLE_FILE_LIST )
HTMLOUT_MAXHISTORY=10
# Prefix author list with the given string in a nested SPAN of class
# 'cvslog2web_authorsprefix' and don't use parenthesis around names.
# Works best if CSS drops Authors span onto separate line, e.g.
# by setting 'display: block'. If the string is empty, uses parenthesis
# around the comma-separated names with no prefix span.
HTMLOUT_AUTHORSPREFIX=""
# HTMLOUT_ORDER allows you to define the order of items in the summary
HO_TITLE,HO_AUTHORS,HO_TIMESTAMP,HO_MESSAGE,HO_FILELIST=range(5)
HTMLOUT_ORDER=[HO_TITLE,HO_AUTHORS,HO_MESSAGE,HO_FILELIST]
# Timestamp format if the HL_TIMESTAMP section is included (see above)
# according to 'strftime'. A space and a timezone indication is always
# appended to the string regardless of format; avoid '%Z' in the string.
HTMLOUT_TIMESTAMPFORMAT="%a. %B %d, %Y at %I:%M:%S %p"
# Optional prefix string, under class 'cvslog2web_timestampprefix' within
# the timestamp DIV.
HTMLOUT_TIMESTAMPPREFIX="Committed "
# Outer encapsulating DIV class for each entry in the list
HTMLOUT_OUTERCLASS="cvslog2web_entry"
# This will be used as the 'target' attribute for all links on the page.
# (Handy if using the HTML output within a frame on your site, and you want the
# diffs to load in another frame.) Ignored if empty, otherwise specify a frame
# name from your site or one of the keywords _self, _parent, _top, or _blank.
HTMLOUT_TARGET=""
################ permanent link pages customization ################
# Prefix for permalink URLs (structure/filename will be appended)
# Can be blank to use relative URLs from the feed/HTML pages
# Put any directory structure in PERMALINK_STRUCTURE, not here
# Only fill this in if you are specifying an absolute URL (i.e. starts with http://)
PERMALINK_URL_PREFIX=""
# Prefix for images; normally they reside in the same place as the permalinks
PERMALINK_IMG_PREFIX="" # if empty string, will fall back to PERMALINK_URL_PREFIX, use None to disable
# strftime format string for permalink files -- can spread among subdirs with '/'
# In a post-processing stage, microseconds are available via '%%(us)d' (with normal printf-style formatting, e.g. %%(us)06d)
PERMALINK_STRUCTURE="commits/%Y/%m/commit-%d-%H-%M-%S-%%(us)06d.html"
# style sheet, will fall back to PERMALINK_URL_PREFIX+HTMLOUT_CSS if blank, use None to disable
PERMALINK_CSS=""
# text to use in the prev/next buttons
PL_PREVTEXT="PREV"
PL_NEXTTEXT="NEXT"
# Mostly for debugging, causes all permalink pages in history to be regenerated
# Default causes full rebuild if the script is run directly (without cvs-provided arguments)
REBUILDPERMALINKS=len(sys.argv)<3
# PERMALINK_ORDER allows you to define the order of items on permalink pages
PL_AUTHORS,PL_TIMESTAMP,PL_MESSAGE,PL_FILELIST,PL_PREVLINK,PL_NEXTLINK,PL_FEEDLINK=range(7)
PERMALINK_ORDER=[PL_TIMESTAMP,PL_PREVLINK,PL_NEXTLINK,PL_AUTHORS,PL_MESSAGE,PL_FILELIST,PL_FEEDLINK]
# But check this out: you can also include an xhtml string to be written at
# any point in the page! If the string begins with '<' it is parsed as xml.
# If it starts with any other character it is escaped as plain text. It is
# not possible to have incomplete tags which span built-in elements.
PERMALINK_ORDER.insert(0,"
")
# As with HTMLOUT_STANDALONE - should permalink pages be full HTML documents
# or just fragments for inclusion in a wider page template?
PERMALINK_STANDALONE=True
# No JavaScript in Permalink pages - standard HTML links cover most cases but
# an onClick attribute directing the window location to the same URL is added
# for "belt and braces", unless overridden.
PERMALINK_ADDJS=True
# Outer encapsulating class for each permalink file's main content
PERMALINK_OUTERCLASS="cvslog2web_entry"
# If defined as an array of three strings, each string is taken to be the URL
# of an image to use in place of the "A" (first array entry), "M" (second array
# entry) and "R" (third and last array entry) letters, which indicate where files
# are added, modified or removed respectively in permalink pages. If an empty
# array, the letters are used.
PERMALINK_STATUSICONS=[]
################ LOAD EXTERNAL ################
# load overrides from optional external configuration file (if specified)
try:
config_file=None
if len(sys.argv)>3:
config_file=os.path.join(os.path.dirname(sys.argv[0]),sys.argv[3])
if len(sys.argv)==2: # running outside loginfo, just regenerate
config_file=os.path.join(os.path.dirname(sys.argv[0]),sys.argv[1])
root=""
args=[]
if config_file:
if os.path.isfile(config_file):
execfile(config_file)
else:
sys.exit("cvslog2web could not find configuration file "+config_file)
except:
import traceback
print "The following exception occurred while processing external configuration file:"
traceback.print_exc()
print "Execution will continue with default settings"
if not FEEDID:
FEEDID=FEEDSELFLINK
if HTMLTITLE=="":
HTMLTITLE=FEEDTITLE
if PERMALINK_IMG_PREFIX=="":
PERMALINK_IMG_PREFIX=PERMALINK_URL_PREFIX
elif not PERMALINK_IMG_PREFIX:
PERMALINK_IMG_PREFIX="";
if PERMALINK_CSS=="":
if PERMALINK_URL_PREFIX:
PERMALINK_CSS=PERMALINK_URL_PREFIX+HTMLOUT_CSS
else:
PERMALINK_CSS="../"*PERMALINK_STRUCTURE.count("/")+HTMLOUT_CSS
# if these are already set to an absolute pathes, then these are no-ops
HTMLOUT=os.path.join(PERMALINKDIR,HTMLOUT)
FEEDOUT=os.path.join(PERMALINKDIR,FEEDOUT)
################################################################################
################ INPUT PARSING ################
################################################################################
# You don't want to change much below here...
# It's icky code from here on out.
import re, pickle, time, datetime, pwd
import xml.dom, xml.dom.minidom
MAXHISTORY=max(MAXHISTORY,FEED_MAXHISTORY,HTMLOUT_MAXHISTORY)
curtime=datetime.datetime.utcnow()
curtime_str=curtime.strftime("%Y-%m-%dT%H:%M:%S.%%06d+00:00") % curtime.microsecond
if os.path.exists(TMPDIR):
if not os.path.isdir(TMPDIR): sys.exit("cvslog2web: file blocking TMPDIR "+TMPDIR)
else:
os.makedirs(TMPDIR)
# verify CVSROOT
root=root.rstrip(os.sep) #strip any extra "/" at the end
if root and not os.path.isdir(root): sys.exit("cvslog2web: bad CVSROOT: "+root)
# Pull the cvslog2web script's version number out of the CVS keyword replacement
SCRIPT_VERSION=re.findall("[0-9.]+",SCRIPT_REVISION)
if len(SCRIPT_VERSION)==0:
sys.exit("cvslog2web: invalid SCRIPT_REVISION setting (no version number) "+SCRIPT_REVISION)
elif len(SCRIPT_VERSION)>1:
print "WARNING cvslog2web SCRIPT_REVISION contains multiple version strings?", SCRIPT_REVISION
SCRIPT_VERSION=SCRIPT_VERSION[0]
if len(sys.argv)<3:
status=os.sep
else:
status=sys.stdin.readline()[:-1]
if not status.startswith("Update of "): sys.exit("cvslog2web: unrecognized cvs output")
status=status[len("Update of "):]
if not status.startswith(root+os.sep): sys.exit("cvslog2web: commit outside repository?")
cidir=status[len(root)+1:] #don't leave intro '/' on cidir
#first word is the directory, pop it off
cipop=""
while cipop!=cidir:
if len(args)==0: sys.exit("cvslog2web: Unable to parse cvs output")
cipop=os.path.join(cipop,args.pop(0))
del cipop
# test to see if this is the result of an import vs. regular commit
if len(sys.argv)>=3 and sys.argv[2].endswith(" - Imported sources"):
files=args[:-len(" - Imported sources")]
imported=True
else:
# args (set from command line) is a string with a series of filename,oldvers,newvers values
# this regular expression parses the string into a list of tuples
# This RE is smart enough to handle filenames with spaces or commas in them!
files=re.findall("(.*?),([0-9.]+|NONE),([0-9.]+|NONE) "," ".join(args)+" ")
imported=False
# This function is used to convert a dotted decimal version string to a version list: "1.2.3" -> [1,2,3]
# This form is much more applicable to comparison -- lexigraphic comparison of original version string will get it wrong
def version2list(s):
if s=="NONE": return []
return map(int,s.split("."))
# Now this next bit converts files into a dictionary mapping of
# names to [oldversion,newversion] lists, using the list form of version numbers
# (we're going to add on to the list of values for each file)
files=dict([(x[0], map(version2list,x[1:])) for x in files])
# Enough of the command line arguments, now parse the stdin message
# We still need to get the status flag (add/modify/remove) for each file
# First we need to get the list of file names in each section
# Each of these sections at this point is just a space-delimited list of file names
added=[]; modified=[]; removed=[]; message=[]; importedFiles=[]; importedTag=""
if len(sys.argv)>=3:
line=sys.stdin.readline()
section=[] #empty initial section
while line:
if section is message:
if imported and line=="Status:\n":
message=["".join(message).strip()+"\n"]
section=[]
else:
section.append(line)
elif line=="Log Message:\n":
section=message
elif line=="Added Files:\n":
section=added
elif line=="Modified Files:\n":
section=modified
elif line=="Removed Files:\n":
section=removed
elif line.strip().startswith("Tag:"):
pass #branch tag, currently no-op... eventually it would be nice to store and track this
elif imported and (line.startswith("Vendor Tag:") or line.startswith("Release Tags:")):
message.append(line)
section=importedFiles
if line.startswith("Release Tags:"):
importedTag=line[len("Release Tags:"):].strip()
elif section is importedFiles:
m=re.findall("([A-Z]) (.*)",line[:-1])
if len(m)==1:
importedFiles.append(m[0])
else:
section.append(line[1:-1]) #strip initial tab and trailing newline/linefeed
line=sys.stdin.readline()
added="".join(added)
modified="".join(modified)
removed="".join(removed)
message="".join(message).strip()
importedFiles.sort(lambda x,y: cmp(x[1],y[1]))
# Don't do anything with new directories (everyone runs with 'update -dP' anyway right?)
# Directories don't matter until there's something in them
if len(sys.argv)>=3 and sys.argv[2].endswith(" - New directory"):
# waited this long because CVS throws a hissy fit ("broken pipe...")
#if you don't read the log message before quitting
sys.exit()
# Constants for symbolic reference to information
ADDED="add"
MODIFIED="mod"
REMOVED="del"
GHOST="ghost" # this comes up later... we won't get this directly in the input
# We have the file names from the command line in 'files' (parsed above).
# Now we need to see which files are in which status section.
# A bit tricky because each space could be separating files
# or could be part of a filename itself.
# Spaces are such a pain, especially when they are being used
# as the delimiter, and your input doesn't escape the "real" spaces! Grrr.
# *** Still not perfect... if "foo" and "foo bar" are both
# involved, this could confuse it. ***
def processFiles(l,tag,out):
partial=""
for f in l.split(" "):
f=partial+f
if f in files:
files[f].append(tag)
out[os.path.join(cidir,f)]=tuple(files[f])
partial=""
else:
partial=f+" "
if partial.strip():
print "WARNING: partial filename in", tag, "section: '"+partial.strip()+"'"
paths={}
processFiles(added,ADDED,paths)
processFiles(modified,MODIFIED,paths)
processFiles(removed,REMOVED,paths)
# now paths is a dictionary mapping full path to (oldv,newv,status) tuples
# We've got our input regarding the current log entry,
# need to compare that against previous entry and see if it's
# all part of the same commit (since this script will be called
# separately for each directory involved in the commit, but
# we want them all to be associated in the same log entry)
# load the last checkin's message and time, with a default value if file not found
def readfile(name,default):
try: f=open(os.path.join(TMPDIR,name),"rb")
except: return default #doesn't exist, that's ok
else:
val=pickle.load(f)
f.close()
return val
# lasttime is the datetime object from the last call to the script
lasttime=readfile("lasttime",curtime-datetime.timedelta(0,TIMEOUT))
# history is everything we know about each entry, up to MAXHISTORY long
# The format is described in the next several lines
history=readfile("history",[])
# These enumerations define the basic fields in each entry in history
PATH,DATETIME,TIMESTAMP,AUTHORS,MESSAGE,IMPORTEDFILES,IMPORTEDTAG=range(7)
### History format ###
# History is a pretty major, and somewhat complex structure.
# It's basically just a list of entries, where each entry is a list indexed
# by the enumerations listed above, defining basic format the log entries
# PATH element holds the 'paths' variable (dictionary mapping paths to version numbers and status flag)
# ID is a string holding the entries unique and immutable ID for the feed (also embeds an initial-commit timestamp)
# TIMESTAMP is the last updated timestamp (datetime object)
# AUTHORS is a list of (name,email) tuples (both are strings)
# MESSAGE is a string holding the log message read from stdin
######################
# get the user's name and username
# This might be a unix-only feature, not the end of the world if you
# have to rely on the contact list in the configuration section, or
# substitute another method
pw_db=pwd.getpwuid(os.getuid())
user=pw_db[0] # the "short" name, e.g. 'ejt'
user_name=pw_db[4] # the "full" name, e.g. 'Ethan Tira-Thompson'
# If within the TIMEOUT and have the same message, merge
# the current file list with the first entry of the history
# Otherwise, add a new entry (popping old entries if len(history)>MAXHISTORY...)
droppedHistory=[] # stores popped entries, reused when rebuilding permalinks
if len(sys.argv)<3:
pass # rebuild shouldn't change any history entries
elif curtime-lasttime0 and history[0][MESSAGE]==message and not imported:
# merge is a little interesting:
# If the file is added and then modified with the same log message,
# merge as still added, just with the later revision number
# Removed and re-added is modified
# Similarly, modified then removed is just removed
# However, added then removed is as if never existed, but still need
# to store file info, (in case of re-add) hence the "ghost" status
def merge(v1,v2):
ov=min(v1[0],v2[0])
nv=max(v1[1],v2[1])
if v1[2]==v2[2]:
t=v1[2]
elif v1[2]==ADDED and v2[2]==REMOVED:
t=GHOST
elif v1[2]==REMOVED and v2[2]==ADDED:
t=MODIFIED
elif v1[2]==ADDED or v2[2]==ADDED:
t=ADDED
elif v1[2]==REMOVED or v2[2]==REMOVED:
t=REMOVED
else:
t=GHOST
return (ov,nv,t)
for k,v in paths.iteritems():
if history[0][PATH].setdefault(k,v)!=v:
history[0][PATH][k]=merge(history[0][PATH][k],v)
history[0][TIMESTAMP]=curtime_str
history[0][AUTHORS][user]=CONTACT.get(user,(user_name,user+"@"+DOMAIN))
else: # push paths as a new entry on its own
authors={user:CONTACT.get(user,(user,user+"@"+DOMAIN))}
history.insert(0,[paths,curtime,curtime_str,authors,message,importedFiles,importedTag])
while len(history)>MAXHISTORY: droppedHistory.append(history.pop())
################################################################################
################ STATUS STORAGE ################
################################################################################
# We want to write out the history as soon as possible to reduce the risk
# of a processing error causing us to drop or corrupt an entry. At least
# once it's stored in the file, if there's an error I can debug it and then
# we can regenerate the output.
if len(sys.argv)>=3: # don't touch files if it's a rebuild
f=open(os.path.join(TMPDIR,"lasttime"),"wb")
pickle.dump(curtime,f)
f.close()
f=open(os.path.join(TMPDIR,"history"),"wb")
pickle.dump(history,f)
f.close()
################################################################################
################ FUNCTION DECLARATIONS ################
################################################################################
# shorthand for adding a subnode with a particular name and textual content
def appendTextualTag(node,name,str):
n=node.appendChild(doc.createElement(name))
n.appendChild(doc.createTextNode(str))
node.appendChild(doc.createTextNode("\n"))
return n
# quick-n-dirty version of set(), when used with reduce()
def collect(x,y):
if y not in x: x.append(y)
return x
# removes first DROP_MODULE directory names from path
def dropModules(path):
for i in range(DROP_MODULE):
x=path.find(os.sep)
if x==-1:
return ""
path=path[x+1:]
return path
# Converts a list of file names to a more compact form, grouping
# those in directories together:
# /foo/bar and /foo/baz become /foo/{bar,baz}
def makeStr(names):
dirs=reduce(collect,[os.path.dirname(x) for x in names],[])
dirs.sort(lambda x,y:-cmp(len(x),len(y))) #go from longest (deepest) to shortest (shallowest)
common={}
remain=names[:]
for d in dirs:
paths=filter(lambda x: x.startswith(d),remain)
paths.sort()
if len(paths)>1:
for p in paths:
common.setdefault(dropModules(d),[]).append(p[len(d)+1:])
remain.remove(p)
for p in remain:
common.setdefault(dropModules(os.path.dirname(p)),[]).append(dropModules(p))
ks=common.keys()
ks.sort()
strs=[]
for k in ks:
v=common[k]
if len(v)>1:
if k:
strs.append(os.path.join(k,"{"+(",".join(v))+"}"))
else:
strs.append(", ".join(v))
else:
strs.append(v[0])
return ", ".join(strs)
# This older version is a bit more simplistic, but doesn't
# handle singleton paths as nicely
#def makeStr(names):
# dirs=reduce(collect,[os.path.dirname(x) for x in names],[])
# strs=[]
# for d in dirs:
# files=map(os.path.basename,filter(lambda x: x.startswith(d),names))
# if len(files)==1:
# strs.append(os.path.join(d,files))
# else:
# strs.append(os.path.join(d,"{"+",".join(files)+"}"))
# return " ".join(strs)
# convert version list to string representation
# [major, minor, patch, ...] -> major.minor.patch....
def vers2str(v):
return ".".join(map(str,v))
# this part computes titles and links for each entry in the history
histFiles=[]
for entry in history:
# generate title string
addstr=makeStr([k for k,v in entry[PATH].iteritems() if v[2]==ADDED])
modstr=makeStr([k for k,v in entry[PATH].iteritems() if v[2]==MODIFIED])
remstr=makeStr([k for k,v in entry[PATH].iteritems() if v[2]==REMOVED])
files=[]
if len(addstr)>0:
files.append("Added "+addstr)
if len(modstr)>0:
files.append("Modified "+modstr)
if len(remstr)>0:
files.append("Removed "+remstr)
files="; ".join(files)
histFiles.append(files)
# Links are a bit tricky; this will generate a dictionary of
# "common paths", where each key is the path, value is a list of
# files (may include singleton paths) within that path
def makeLinks(paths):
dirs=reduce(collect,[os.path.dirname(x) for x in paths],[])
dirs.sort(lambda x,y:-cmp(len(x),len(y))) #go from longest (deepest) to shortest (shallowest)
links={}
remain=paths
for d in dirs:
paths=filter(lambda x: x.startswith(d),remain)
if len(paths)>1:
paths.sort()
for p in paths:
info=entry[PATH][p]
links.setdefault(dropModules(d),[]).append(dict(path=p,filename=p[len(d)+1:],oldversion=vers2str(info[0]),newversion=vers2str(info[1])))
remain.remove(p)
for p in remain:
info=entry[PATH][p]
links.setdefault(dropModules(os.path.dirname(p)),[]).append(dict(path=p,filename=dropModules(p),oldversion=vers2str(info[0]),newversion=vers2str(info[1])))
return links
histLinks=[]
if ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE:
for entry in history:
addlinks=makeLinks([k for k,v in entry[PATH].iteritems() if v[2]==ADDED])
modlinks=makeLinks([k for k,v in entry[PATH].iteritems() if v[2]==MODIFIED])
remlinks=makeLinks([k for k,v in entry[PATH].iteritems() if v[2]==REMOVED])
links={}
if len(addlinks)>0:
links["Added: "]=addlinks
if len(modlinks)>0:
links["Modified: "]=modlinks
if len(remlinks)>0:
links["Removed: "]=remlinks
histLinks.append(links)
else:
for entry in history:
histLinks.append({"Diff: ": makeLinks(entry[PATH].keys())})
def genPermalink(t):
permdir=t.strftime(PERMALINK_STRUCTURE) % {"us":t.microsecond}
return PERMALINK_URL_PREFIX+permdir
def genPermafile(t):
permdir=t.strftime(PERMALINK_STRUCTURE) % {"us":t.microsecond}
return os.path.join(PERMALINKDIR,permdir.replace("/",os.sep))
################################################################################
################ RSS (ATOM) OUTPUT ################
################################################################################
if FEEDOUT:
dom=xml.dom.getDOMImplementation()
doc=dom.createDocument("http://www.w3.org/2005/Atom","feed",None)
feed=doc.documentElement
doc.insertBefore(doc.createProcessingInstruction("xml-stylesheet",'href="Atom.css" type="text/css"'),feed)
feed.setAttribute("xmlns","http://www.w3.org/2005/Atom")
feed.appendChild(doc.createTextNode("\n"))
# feed header tags
appendTextualTag(feed,"id",FEEDID)
appendTextualTag(feed,"title",FEEDTITLE)
if FEEDHOMELINK:
linknode=feed.appendChild(doc.createElement("link"))
linknode.setAttribute("rel","alternate")
linknode.setAttribute("href",FEEDHOMELINK)
feed.appendChild(doc.createTextNode("\n"))
if FEEDSELFLINK:
linknode=feed.appendChild(doc.createElement("link"))
linknode.setAttribute("rel","self")
linknode.setAttribute("href",FEEDSELFLINK)
feed.appendChild(doc.createTextNode("\n"))
if FEEDLOGO:
appendTextualTag(feed,"logo",FEEDLOGO)
if FEEDICON:
appendTextualTag(feed,"icon",FEEDICON)
if FEEDDESCRIPTION:
appendTextualTag(feed,"subtitle",FEEDDESCRIPTION)
appendTextualTag(feed,"updated",curtime_str)
generator=feed.appendChild(doc.createElement("generator"))
generator.setAttribute("uri",SCRIPT_URL)
generator.setAttribute("version",SCRIPT_VERSION)
generator.appendChild(doc.createTextNode("cvslog2web"))
feed.appendChild(doc.createTextNode("\n"))
for (entry,files,links) in zip(history,histFiles,histLinks)[:FEED_MAXHISTORY]:
msg=entry[MESSAGE].splitlines()
# entry header tags
node=feed.appendChild(doc.createElement("entry"))
node.appendChild(doc.createTextNode("\n"))
t=entry[DATETIME]
appendTextualTag(node,"id",entry[DATETIME].strftime("tag:%%s,%Y-%m-%d:%%05d.%%06d")%(DOMAIN,t.hour*60*60+t.minute*60+t.second,t.microsecond))
if ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and len(msg)>0:
appendTextualTag(node,"title",FEEDENTRYPREFIX+msg[0])
elif ENTRYTITLE==TITLE_FILE_LIST or len(msg)==0:
appendTextualTag(node,"title",FEEDENTRYPREFIX+files)
else:
sys.exit("cvslog2web: bad ENTRYTITLE setting")
appendTextualTag(node,"updated",entry[TIMESTAMP])
for n in entry[AUTHORS].itervalues():
author=node.appendChild(doc.createElement("author"))
author.appendChild(doc.createElement("name")).appendChild(doc.createTextNode(n[0]))
if len(n)>1 and n[1]!="":
author.appendChild(doc.createElement("email")).appendChild(doc.createTextNode(n[1]))
node.appendChild(doc.createTextNode("\n"))
if PERMALINKDIR:
linknode=node.appendChild(doc.createElement("link"))
linknode.setAttribute("rel","alternate")
linknode.setAttribute("href",genPermalink(entry[DATETIME]))
linknode.setAttribute("type","text/html")
if FEEDRELATED:
for ll in links.itervalues():
for group in ll.itervalues():
for l in group:
linknode=node.appendChild(doc.createElement("link"))
linknode.setAttribute("rel","related")
linknode.setAttribute("title",l["path"])
if l["oldversion"] and l["newversion"]:
linknode.setAttribute("href",(DIFFLINKFORMAT % l).replace(" ","%20"))
else:
linknode.setAttribute("href",(VIEWLINKFORMAT % l).replace(" ","%20"))
linknode.setAttribute("type","text/html")
# CONTENT section
content=node.appendChild(doc.createElement("content"))
content.setAttribute("type","xhtml")
content=content.appendChild(doc.createElement("div"))
content.setAttribute("xmlns","http://www.w3.org/1999/xhtml")
content.appendChild(doc.createTextNode("\n"))
for i,m in enumerate(msg):
if i==0 and ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and (REPEAT_FIRST_LINE==REPEAT_NEVER or REPEAT_FIRST_LINE==REPEAT_WHEN_MULTIPLE and len(msg)==1):
continue
appendTextualTag(content,"div",m).setAttribute("style","padding:.25em 0;")
# End of message embedding
#now add links to content
filelist=content.appendChild(doc.createElement("div"))
if len(msg)>1:
filelist.setAttribute("style","padding:.6em 0;")
for pre,ll in links.iteritems():
diffs=filelist.appendChild(doc.createElement("div"))
diffs.setAttribute("style","padding:.25em 0;")
diffs.appendChild(doc.createTextNode(pre))
diffs=diffs.appendChild(doc.createElement("tt"))
firstSet=True
for k,group in ll.iteritems():
if not firstSet:
diffs.appendChild(doc.createTextNode(", "))
if len(group)>1 and k:
diffs.appendChild(doc.createTextNode(os.path.join(k,"{")))
firstLink=True
for l in group:
if not firstLink:
diffs.appendChild(doc.createTextNode(", "))
a=diffs.appendChild(doc.createElement("a"))
if l["oldversion"] and l["newversion"]:
a.setAttribute("href",(DIFFLINKFORMAT % l).replace(" ","%20"))
else:
a.setAttribute("href",(VIEWLINKFORMAT % l).replace(" ","%20"))
a.appendChild(doc.createTextNode(l["filename"]))
firstLink=False
if len(group)>1 and k:
diffs.appendChild(doc.createTextNode("}"))
firstSet=False
filelist.appendChild(doc.createTextNode("\n"))
#end of content
node.appendChild(doc.createTextNode("\n"))
f=open(FEEDOUT,"wb")
doc.writexml(f)
f.close()
doc.unlink()
if VERBOSE:
print "Feed update successful"
################################################################################
################ HTML OUTPUT ################
################################################################################
# some functions for HTML output, shared by HTMLOUT and PERMALINK output
def HTMLHeader(dom,title,css,rootclass):
doctype=dom.createDocumentType("html","//W3C//DTD XHTML 1.0 Transitional//EN","http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd")
doc=dom.createDocument("http://www.w3.org/1999/xhtml","html",doctype)
html=doc.documentElement
html.setAttribute("xmlns","http://www.w3.org/1999/xhtml")
html.appendChild(doc.createTextNode("\n"))
head=html.appendChild(doc.createElement("head"))
appendTextualTag(head,"title",title)
if css:
link=head.appendChild(doc.createElement("link"))
link.setAttribute("rel","stylesheet")
link.setAttribute("type","text/css")
link.setAttribute("href",css)
head.appendChild(doc.createTextNode("\n"))
if FEEDOUT and FEEDSELFLINK:
link=head.appendChild(doc.createElement("link"))
link.setAttribute("rel","alternate")
link.setAttribute("title","Atom Syndication Feed")
link.setAttribute("type","application/atom+xml")
link.setAttribute("href",FEEDSELFLINK)
head.appendChild(doc.createTextNode("\n"))
body=html.appendChild(doc.createElement("body"))
body.appendChild(doc.createTextNode("\n"))
rootdiv=body.appendChild(doc.createElement("div"))
body.appendChild(doc.createTextNode("\n"))
rootdiv.appendChild(doc.createTextNode("\n"))
rootdiv.setAttribute("class",rootclass)
return (doc,rootdiv)
def appendLinks(filelist,pre,ll):
filelist=filelist.appendChild(doc.createElement("p"))
appendTextualTag(filelist,"span",pre).setAttribute("class","cvslog2web_filestatus")
firstSet=True
for k,v in ll.iteritems():
if not firstSet:
filelist.appendChild(doc.createTextNode(", "))
if len(v)>1 and k:
filelist.appendChild(doc.createTextNode(os.path.join(k,"{")))
firstLink=True
for l in v:
if not firstLink:
filelist.appendChild(doc.createTextNode(", "))
a=filelist.appendChild(doc.createElement("a"))
if HTMLOUT_TARGET:
a.setAttribute("target",HTMLOUT_TARGET)
if l["oldversion"] and l["newversion"]:
a.setAttribute("href",(DIFFLINKFORMAT % l).replace(" ","%20"))
else:
a.setAttribute("href",(VIEWLINKFORMAT % l).replace(" ","%20"))
a.appendChild(doc.createTextNode(l["filename"]))
firstLink=False
if len(v)>1 and k:
filelist.appendChild(doc.createTextNode("}"))
firstSet=False
################ HTML STANDALONE OUTPUT ################
if HTMLOUT:
dom=xml.dom.getDOMImplementation()
if not HTMLOUT_STANDALONE:
if "createDocumentFragment" in dir(dom):
doc=dom.createDocumentFragment()
else:
doc=dom.createDocument(None,None,None)
rootdiv=doc.appendChild(doc.createElement("div"))
rootdiv.setAttribute("xmlns","http://www.w3.org/1999/xhtml")
rootdiv.appendChild(doc.createTextNode("\n"))
rootdiv.setAttribute("class","cvslog2web_index")
else:
doc,rootdiv=HTMLHeader(dom,HTMLTITLE,HTMLOUT_CSS,"cvslog2web_index")
for (entry,files,links) in zip(history,histFiles,histLinks)[:HTMLOUT_MAXHISTORY]:
msg=entry[MESSAGE].splitlines()
# entry header tags
node=rootdiv.appendChild(doc.createElement("div"))
node.setAttribute("class",HTMLOUT_OUTERCLASS)
node.appendChild(doc.createTextNode("\n"))
for section in HTMLOUT_ORDER:
if section==HO_TITLE:
# Title section
titlenode=node.appendChild(doc.createElement("div"))
titlenode.setAttribute("class","cvslog2web_title")
if PERMALINKDIR:
a=appendTextualTag(titlenode,"a",HTMLOUT_ENTRYPREFIX)
if HTMLOUT_TARGET:
a.setAttribute("target",HTMLOUT_TARGET)
a.setAttribute("href",genPermalink(entry[DATETIME]))
if ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and len(msg)>0: appendTextualTag(a,"span",msg[0]).setAttribute("class","cvslog2web_message")
elif ENTRYTITLE==TITLE_FILE_LIST or len(msg)==0: appendTextualTag(a,"span",files).setAttribute("class","cvslog2web_filelist")
else: sys.exit("cvslog2web: bad ENTRYTITLE setting")
else:
titlenode.appendChild(doc.createTextNode(HTMLOUT_ENTRYPREFIX))
if ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and len(msg)>0: appendTextualTag(titlenode,"span",msg[0]).setAttribute("class","cvslog2web_message")
elif ENTRYTITLE==TITLE_FILE_LIST or len(msg)==0: appendTextualTag(titlenode,"span",files).setAttribute("class","cvslog2web_filelist")
else: sys.exit("cvslog2web: bad ENTRYTITLE setting")
titlenode.appendChild(doc.createTextNode(" "))
elif section==HO_AUTHORS:
# Authors section
authors=titlenode.appendChild(doc.createElement("span"))
authors.setAttribute("class","cvslog2web_authors")
if HTMLOUT_AUTHORSPREFIX:
authorsprefix=authors.appendChild(doc.createElement("span"))
authorsprefix.setAttribute("class","cvslog2web_authorsprefix")
authorsprefix.appendChild(doc.createTextNode(HTMLOUT_AUTHORSPREFIX))
authors.appendChild(doc.createTextNode(",".join([k for k,n in entry[AUTHORS].iteritems()])))
else:
authors.appendChild(doc.createTextNode("("+",".join([k for k,n in entry[AUTHORS].iteritems()])+")"))
elif section==HO_TIMESTAMP:
# Commit date and time
if time.daylight:
t=entry[DATETIME]-datetime.timedelta(seconds=time.altzone)
tz=time.tzname[1]
else:
t=entry[DATETIME]-datetime.timedelta(seconds=time.timezone)
tz=time.tzname[0]
timestampstr=t.strftime(HTMLOUT_TIMESTAMPFORMAT+" "+tz)
timestamp=node.appendChild(doc.createElement("div"))
timestamp.setAttribute("class","cvslog2web_timestamp")
if HTMLOUT_TIMESTAMPPREFIX:
timestampprefix=timestamp.appendChild(doc.createElement("span"))
timestampprefix.setAttribute("class","cvslog2web_timestampprefix")
timestampprefix.appendChild(doc.createTextNode(HTMLOUT_TIMESTAMPPREFIX))
timestamp.appendChild(doc.createTextNode(timestampstr))
elif section==HO_MESSAGE:
# Content section
if HTMLOUT_MESSAGE:
content=node.appendChild(doc.createElement("div"))
content.setAttribute("class","cvslog2web_message")
content.appendChild(doc.createTextNode("\n"))
for i,m in enumerate(msg):
if i==0 and ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and (REPEAT_FIRST_LINE==REPEAT_NEVER or REPEAT_FIRST_LINE==REPEAT_WHEN_MULTIPLE and len(msg)==1):
continue
appendTextualTag(content,"p",m)
if len(msg)==1 and ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and (REPEAT_FIRST_LINE==REPEAT_NEVER or REPEAT_FIRST_LINE==REPEAT_WHEN_MULTIPLE):
node.removeChild(content)
elif section==HO_FILELIST:
if len(links)>0 and HTMLOUT_FILELIST and ENTRYTITLE!=TITLE_FILE_LIST:
#now add links to content
filelist=node.appendChild(doc.createElement("div"))
filelist.setAttribute("class","cvslog2web_filelist")
filelist.appendChild(doc.createTextNode("\n"))
for pre,l in links.iteritems():
appendLinks(filelist,pre,l)
#end of content
node.appendChild(doc.createTextNode("\n"))
gen=rootdiv.appendChild(doc.createElement("div"))
gen.setAttribute("class","cvslog2web_credit")
gen.appendChild(doc.createTextNode("Generated by "))
a=appendTextualTag(gen,"a","cvslog2web")
a.setAttribute("target","_top")
a.setAttribute("href",SCRIPT_URL)
gen.appendChild(doc.createTextNode(SCRIPT_VERSION))
f=open(HTMLOUT,"wb")
if HTMLOUT_STANDALONE or "createDocumentFragment" in dir(dom):
doc.writexml(f)
else:
# createDocumentFragment unavailable, hack it and strip the xml processing instruction
s=doc.toxml()
s=s[s.find("\n")+1:]
f.write(s)
f.close()
doc.unlink()
if VERBOSE:
print "HTML update successful"
################ HTML PERMALINK OUTPUT ################
def normalizeLink(link):
if not PERMALINK_URL_PREFIX:
return "../"*PERMALINK_STRUCTURE.count("/")+link
return link
def permalinkStatusIcon(node,index,alt):
i=node.appendChild(doc.createElement("img"))
i.setAttribute("src",normalizeLink(PERMALINK_STATUSICONS[index]))
i.setAttribute("alt",alt)
def writePermalink(entry,files,links,prevLink="",nextLink=""):
if prevLink: prevLink=normalizeLink(prevLink)
if nextLink: nextLink=normalizeLink(nextLink)
permalink=genPermalink(entry[DATETIME])
permafile=genPermafile(entry[DATETIME])
permdir=os.path.dirname(permafile)
if os.path.exists(permdir):
if not os.path.isdir(permdir): sys.exit("cvslog2web: file blocking PERMALINKDIR "+permdir)
else:
os.makedirs(permdir)
msg=entry[MESSAGE].splitlines()
# entry header tags
if ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and len(msg)>0:
title=FEEDENTRYPREFIX+msg[0]
elif ENTRYTITLE==TITLE_FILE_LIST or len(msg)==0:
title=FEEDENTRYPREFIX+files
else:
sys.exit("cvslog2web: bad ENTRYTITLE setting")
dom=xml.dom.getDOMImplementation()
if not PERMALINK_STANDALONE:
if "createDocumentFragment" in dir(dom):
doc=dom.createDocumentFragment()
else:
doc=dom.createDocument(None,None,None)
rootdiv=doc.appendChild(doc.createElement("div"))
rootdiv.setAttribute("xmlns","http://www.w3.org/1999/xhtml")
rootdiv.appendChild(doc.createTextNode("\n"))
rootdiv.setAttribute("class","cvslog2web_permalink")
else:
doc,rootdiv=HTMLHeader(dom,title,PERMALINK_CSS,"cvslog2web_permalink")
node=rootdiv.appendChild(doc.createElement("div"))
node.setAttribute("class",PERMALINK_OUTERCLASS)
node.appendChild(doc.createTextNode("\n"))
for section in PERMALINK_ORDER:
# Previous link
if section==PL_PREVLINK:
n=node.appendChild(doc.createElement("div"))
if prevLink:
n.setAttribute("class","cvslog2web_nav_prev")
if PERMALINK_ADDJS: n.setAttribute("onClick","window.location.href='"+prevLink+"'")
a=n.appendChild(doc.createElement("a"))
a.setAttribute("href",prevLink)
a.appendChild(doc.createTextNode(PL_PREVTEXT))
else:
n.setAttribute("class","cvslog2web_nav_prev_disabled")
n.appendChild(doc.createTextNode(PL_PREVTEXT))
# Previous link
elif section==PL_NEXTLINK:
n=node.appendChild(doc.createElement("div"))
if nextLink:
n.setAttribute("class","cvslog2web_nav_next")
if PERMALINK_ADDJS: n.setAttribute("onClick","window.location.href='"+nextLink+"'")
a=n.appendChild(doc.createElement("a"))
a.setAttribute("href",nextLink)
a.appendChild(doc.createTextNode(PL_NEXTTEXT))
else:
n.setAttribute("class","cvslog2web_nav_next_disabled")
n.appendChild(doc.createTextNode(PL_NEXTTEXT))
# Feed link
elif section==PL_FEEDLINK and FEEDOUT and FEEDSELFLINK:
n=node.appendChild(doc.createElement("div"))
n.setAttribute("class","cvslog2web_feedlink")
a=n.appendChild(doc.createElement("a"))
a.setAttribute("href",FEEDSELFLINK)
i=a.appendChild(doc.createElement("img"))
i.setAttribute("src",normalizeLink(PERMALINK_IMG_PREFIX+"atom_feed.png"))
i.setAttribute("width","84")
i.setAttribute("height","15")
i.setAttribute("alt","Atom Badge")
a.appendChild(doc.createTextNode(" "))
i=a.appendChild(doc.createElement("img"))
i.setAttribute("src",normalizeLink(PERMALINK_IMG_PREFIX+"feed_icon.png"))
i.setAttribute("width","15")
i.setAttribute("height","15")
i.setAttribute("alt","Feed Icon")
# Title (Timestamp)
elif section==PL_TIMESTAMP:
if time.daylight:
t=entry[DATETIME]-datetime.timedelta(seconds=time.altzone)
tz=time.tzname[1]
else:
t=entry[DATETIME]-datetime.timedelta(seconds=time.timezone)
tz=time.tzname[0]
title=t.strftime("Commited %a. %B %d, %Y at %I:%M:%S %p "+tz)
appendTextualTag(node,"div",title).setAttribute("class","cvslog2web_timestamp")
# Authors
elif section==PL_AUTHORS:
authors=node.appendChild(doc.createElement("div"))
authors.setAttribute("class","cvslog2web_authors")
authors.appendChild(doc.createTextNode("\nfrom "))
first=True
for n in entry[AUTHORS].itervalues():
if first: first=False
else: authors.appendChild(doc.createTextNode(", "))
l=appendTextualTag(authors,"a",n[0])
l.setAttribute("href","mailto:"+n[1])
# Message
elif section==PL_MESSAGE:
content=node.appendChild(doc.createElement("div"))
content.setAttribute("class","cvslog2web_message")
for i,m in enumerate(msg):
content.appendChild(doc.createTextNode("\n"+m))
content.appendChild(doc.createElement("br"))
content.appendChild(doc.createTextNode("\n"))
# Links
elif section==PL_FILELIST:
links=node.appendChild(doc.createElement("div"))
links.setAttribute("class","cvslog2web_filelist")
links.appendChild(doc.createTextNode("\n"))
spaths=entry[PATH].keys()
spaths.sort()
for path in spaths:
info=entry[PATH][path]
status=links.appendChild(doc.createElement("span"))
status.setAttribute("class","cvslog2web_filestatus")
if len(PERMALINK_STATUSICONS)==3:
if info[2]==ADDED: permalinkStatusIcon(status, 0, "A")
elif info[2]==MODIFIED: permalinkStatusIcon(status, 1, "M")
elif info[2]==REMOVED: permalinkStatusIcon(status, 2, "R")
else: sys.exit("cvslog2web: bad entry[PATH] status flag")
else:
if info[2]==ADDED: status.appendChild(doc.createTextNode("A"))
elif info[2]==MODIFIED: status.appendChild(doc.createTextNode("M"))
elif info[2]==REMOVED: status.appendChild(doc.createTextNode("R"))
else: sys.exit("cvslog2web: bad entry[PATH] status flag")
a=links.appendChild(doc.createElement("a"))
lid=dict(path=path,filename=os.path.basename(path),oldversion=vers2str(info[0]),newversion=vers2str(info[1]))
if lid["oldversion"] and lid["newversion"]:
a.setAttribute("href",(DIFFLINKFORMAT % lid).replace(" ","%20"))
else:
a.setAttribute("href",(VIEWLINKFORMAT % lid).replace(" ","%20"))
a.appendChild(doc.createTextNode(dropModules(path)))
links.appendChild(doc.createElement("br"))
links.appendChild(doc.createTextNode("\n"))
if len(entry)>IMPORTEDFILES and len(entry)>IMPORTEDTAG: #just for backward compatability
for f in entry[IMPORTEDFILES]:
status=links.appendChild(doc.createElement("span"))
status.setAttribute("class","cvslog2web_filestatus")
status.appendChild(doc.createTextNode(f[0]))
a=links.appendChild(doc.createElement("a"))
lid=dict(path=f[1],filename=os.path.basename(f[1]),oldversion="",newversion=entry[IMPORTEDTAG])
a.setAttribute("href",(VIEWLINKFORMAT % lid).replace(" ","%20"))
a.appendChild(doc.createTextNode(dropModules(f[1])))
links.appendChild(doc.createElement("br"))
links.appendChild(doc.createTextNode("\n"))
# Special content (user defined string?)
elif section.__class__=="".__class__:
if not section.strip().startswith("<"):
# doesn't start with a tag, we append as text
node.appendChild(doc.createTextNode(section))
else:
# Have to parse it so we can append it
# If we just appended it as text, all the good stuff would be escaped away
subdoc=xml.dom.minidom.parseString(section)
for n in subdoc.childNodes:
node.appendChild(subdoc.removeChild(n))
subdoc.unlink()
#put a return in the source after eact section
node.appendChild(doc.createTextNode("\n"))
if PERMALINK_STANDALONE:
gen=rootdiv.parentNode.appendChild(doc.createElement("div"))
else:
gen=rootdiv.appendChild(doc.createElement("div"))
gen.setAttribute("class","cvslog2web_credit")
gen.appendChild(doc.createTextNode("Generated by "))
a=appendTextualTag(gen,"a","cvslog2web")
a.setAttribute("target","_top")
a.setAttribute("href",SCRIPT_URL)
gen.appendChild(doc.createTextNode(SCRIPT_VERSION))
f=open(permafile,"wb")
if PERMALINK_STANDALONE or "createDocumentFragment" in dir(dom):
doc.writexml(f)
else:
# createDocumentFragment unavailable, hack it and strip the xml processing instruction
s=doc.toxml()
s=s[s.find("\n")+1:]
f.write(s)
f.close()
doc.unlink()
if VERBOSE:
print "Permalink generated:", genPermalink(entry[DATETIME])
def genPermalinkEntry(i):
if i=MAXHISTORY:
pass #don't do last permalink if it's a rebuilt -- would break link chain because we don't have previous
elif len(droppedHistory)==0:
i=len(history)-1
writePermalink(history[i],histFiles[i],histLinks[i],None,genPermalinkEntry(i-1))
else:
i=len(history)-1
writePermalink(history[i],histFiles[i],histLinks[i],genPermalink(droppedHistory[-1][DATETIME]),genPermalinkEntry(i-1))
else:
writePermalink(history[0],histFiles[0],histLinks[0],genPermalinkEntry(1))
curpl=genPermalinkEntry(0)
if len(history)>1:
writePermalink(history[1],histFiles[1],histLinks[1],genPermalinkEntry(2),curpl)