Changesets can be listed by changeset number.
The Git repository is here.
- Revision:
- 142
- Log:
Initial import of cvslog2web, a(nother) CVS log analysis tool.
- Author:
- adh
- Date:
- Tue Nov 14 22:28:44 +0000 2006
- Size:
- 45311 Bytes
- Properties:
- Property svn:executable is set
1 | #!/usr/bin/env python |
2 | |
3 | # cvslog2web by Ethan Tira-Thompson |
4 | # Released under the GPL (http://www.gnu.org/copyleft/gpl.html) |
5 | # $Date: 2006/08/22 04:34:23 $ |
6 | # Provides syndicated (Atom) and HTML output from CVS commit logs |
7 | SCRIPT_REVISION="$Revision: 1.11 $" |
8 | SCRIPT_URL="http://ethan.tira-thompson.com/cvslog2web" |
9 | |
10 | ################################################################################ |
11 | ################ INSTALLATION ################ |
12 | ################################################################################ |
13 | |
14 | # To install this script, copy it into the CVSROOT directory of your |
15 | # repository, and then add the following line to CVSROOT/loginfo: |
16 | # ALL python $CVSROOT/CVSROOT/cvslog2web.py $CVSROOT %{sVv} [config-file] |
17 | |
18 | # Don't forget you can replace 'ALL' with a filter to only apply |
19 | # cvslog2web to certain modules, or apply different copies of cvslog2web |
20 | # (presumably with different configuration settings) to different modules |
21 | |
22 | # The optional config file can hold the configuration parameters shown below |
23 | # This is convenient to use the same script with different settings for |
24 | # different modules (as opposed to copying the script itself) |
25 | |
26 | # You probably also received some stylesheet and images. The default |
27 | # placement for these files depends on the directory layout of your website: |
28 | # Atom.css -- same directory as the feed output (FEEDOUT) |
29 | # cvslog2web.css -- HTMLOUT_CSS and PERMALINK_CSS, default to same directory as |
30 | # the html page (HTMLOUT), and root of the permalink structure respectively |
31 | # nav_*.gif -- same directory as PERMALINK_CSS |
32 | # *.png -- Root of permalink structure (PERMALINK_URL_PREFIX) |
33 | |
34 | ################################################################################ |
35 | ################ CONFIGURATION ################ |
36 | ################################################################################ |
37 | |
38 | import os,sys |
39 | |
40 | # Given the command line suggested above: |
41 | # arg 0 will be the script path (unused) |
42 | if len(sys.argv)>2: |
43 | root=sys.argv[1] # arg 1 will be the repository root (CVSROOT) |
44 | args=sys.argv[2].split(" ") # arg 2 is the directory of the commit followed by a list of files (space delimited) |
45 | # arg 3 is an optional configuration file parameter, which will override the defaults show below |
46 | # arg 3 is handled at the end of the configuration section |
47 | |
48 | #### These first three settings control the destination of the output #### |
49 | #### Set to None or empty string ("") to disable that format's generation #### |
50 | |
51 | # This is the directory to hold entries as individual html files |
52 | # If PERMALINKDIR is disabled, cannot generate links from the feed and HTML |
53 | # to permanent log entries (but that's perfectly legal to do) |
54 | # PERMALINKDIR should essentially be the root of your webserver, can use |
55 | # PERMALINK_STRUCTURE setting (below) to subdivide files into subdirectories |
56 | PERMALINKDIR="/Users/ejt/Sites/" |
57 | |
58 | # where to direct the Atom feed output, relative paths are interpreted from PERMALINKDIR |
59 | FEEDOUT="cvs.xml" |
60 | |
61 | # will hold the most recent MAXHISTORY entries, relative paths are interpreted from PERMALINKDIR |
62 | # This is intended as a quick list of recent entries, suitable for including into a larger page via SSI or frames |
63 | HTMLOUT="recent.html" |
64 | |
65 | |
66 | # Defaults sets the tmp directory to be created in the same location as the script |
67 | # This directory will hold status files between commits |
68 | # Be sure to think through moving this outside CVSROOT (all commiters need access to the same files) |
69 | TMPDIR=os.path.join(os.path.dirname(sys.argv[0]),"cvslog2web-tmp") |
70 | |
71 | # maximum amount of time (seconds) in which checkins with the same message will be grouped together |
72 | TIMEOUT=15*60 |
73 | |
74 | # number of checkins to retain |
75 | # this will be reset to max(MAXHISTORY,FEED_MAXHISTORY,HTMLOUT_MAXHISTORY) |
76 | MAXHISTORY=0 |
77 | |
78 | # domain to use for entry id tags and default email addresses |
79 | DOMAIN="example.org" |
80 | |
81 | # viewcvs integration -- links from output to diffs |
82 | # format has 4 fields available: filename, path, oldversion, newversion |
83 | DIFFLINKFORMAT="http://www.example.org/cgi/viewcvs.cgi/%(path)s.diff?r1=%(oldversion)s&r2=%(newversion)s" |
84 | |
85 | # For adds and removes, no diff is available, one of oldversion or newversion |
86 | # will be set (depending on add or remove), and the other will be an empty string |
87 | # Note how this default handles this by running them both together so it doesn't matter which is set |
88 | VIEWLINKFORMAT="http://www.example.org/cgi/viewcvs.cgi/%(path)s?rev=%(oldversion)s%(newversion)s&content-type=text/vnd.viewcvs-markup" |
89 | |
90 | # entry titles can be a file list or the first line of the commit message |
91 | TITLE_FILE_LIST, TITLE_MESSAGE_FIRST_LINE=range(2) |
92 | ENTRYTITLE=TITLE_MESSAGE_FIRST_LINE |
93 | |
94 | # if using message as title (TITLE_MESSAGE_FIRST_LINE), can either |
95 | # repeat that line again in the content, or just skip it |
96 | REPEAT_ALWAYS, REPEAT_WHEN_MULTIPLE, REPEAT_NEVER=range(3) |
97 | REPEAT_FIRST_LINE=REPEAT_WHEN_MULTIPLE |
98 | |
99 | # contact list, user names are looked up here, if not found, will fall |
100 | # back to information obtained from the system (password database and DOMAIN) |
101 | CONTACT={ |
102 | "ejt":("Ethan Tira-Thompson","ejt@cs.cmu.edu"), |
103 | "guest":("Guest",None) #use None or empty string ('') to hide email |
104 | } |
105 | |
106 | # Can drop the module name from paths in the file list |
107 | # Useful when you only have one module in the repository, or are filtering |
108 | # the input being passed to cvslog2web from the loginfo file |
109 | # Value is the depth to drop, so 0 drops nothing, 1 drops the first directory, etc. |
110 | DROP_MODULE=0 |
111 | |
112 | # controls whether feedback is given as each type of output is produced |
113 | VERBOSE=True |
114 | |
115 | |
116 | ################ ATOM feed customization ################ |
117 | FEEDTITLE="cvslog2web CVS" # title for the feed |
118 | FEEDDESCRIPTION="" # a short description of the feed |
119 | FEEDHOMELINK="http://www.example.org/" # a link to the "main" page |
120 | FEEDLOGO="" # spec says this image should be twice as wide as it is tall |
121 | FEEDICON="" # spec says this should be squared (as wide as tall) |
122 | FEEDENTRYPREFIX="Commit: " # prefix for entry titles in the feed output |
123 | # Controls generation of a list of <link rel="related"> tags for each file |
124 | # (not shown by many readers, and generally duplicates the file list in those that do.) |
125 | FEEDRELATED=False |
126 | FEED_MAXHISTORY=15 |
127 | |
128 | # FEEDSELFLINK *should* be set, but FEEDID *must* be set... if you don't |
129 | # provide FEEDSELFLINK, you will instead need to fill in FEEDID |
130 | # Further, the HTML index and permalink pages require the self link if you want them to contain links to the feed |
131 | # (why two separate settings? If you move the link, keep the link as the ID to maintain identity) |
132 | # These must be full, absolute URLs, not a relative path! |
133 | FEEDSELFLINK="http://www.example.org/cvs.xml" #self-link for the feed |
134 | FEEDID="" # a globally unique ID for the feed -- if empty, will be set to FEEDSELFLINK |
135 | |
136 | |
137 | ################ HTML output customization ################ |
138 | HTMLOUT_STANDALONE=True # whether to make the root element <html> to stand alone (vs. included via server-side include) |
139 | HTMLOUT_CSS="cvslog2web.css" # style sheet to include (only applies if HTML_STANDALONE is True) |
140 | HTMLTITLE="" # title for the html output, will fall back to FEEDTITLE if blank, use None to disable |
141 | HTMLOUT_ENTRYPREFIX="" # prefix for title lines; does *not* fall back on FEEDENTRYPREFIX |
142 | HTMLOUT_MESSAGE=True # set to False to skip the log body |
143 | HTMLOUT_FILELIST=True # set to False to skip the file list (also skipped if ENTRYTITLE==TITLE_FILE_LIST ) |
144 | HTMLOUT_MAXHISTORY=10 |
145 | |
146 | # This will be used as the 'target' attribute for all links on the page. |
147 | # (Handy if using the HTML output within a frame on your site, and you want the |
148 | # diffs to load in another frame.) Ignored if empty, otherwise specify a frame |
149 | # name from your site or one of the keywords _self, _parent, _top, or _blank. |
150 | HTMLOUT_TARGET="" |
151 | |
152 | ################ permanent link pages customization ################ |
153 | # Prefix for permalink URLs (structure/filename will be appended) |
154 | # Can be blank to use relative URLs from the feed/HTML pages |
155 | # Put any directory structure in PERMALINK_STRUCTURE, not here |
156 | # Only fill this in if you are specifying an absolute URL (i.e. starts with http://) |
157 | PERMALINK_URL_PREFIX="" |
158 | # strftime format string for permalink files -- can spread among subdirs with '/' |
159 | # In a post-processing stage, microseconds are available via '%%(us)d' (with normal printf-style formatting, e.g. %%(us)06d) |
160 | PERMALINK_STRUCTURE="commits/%Y/%m/commit-%d-%H-%M-%S-%%(us)06d.html" |
161 | # style sheet, will fall back to PERMALINK_URL_PREFIX+HTMLOUT_CSS if blank, use None to disable |
162 | PERMALINK_CSS="" |
163 | # text to use in the prev/next buttons |
164 | PL_PREVTEXT="PREV" |
165 | PL_NEXTTEXT="NEXT" |
166 | # Mostly for debugging, causes all permalink pages in history to be regenerated |
167 | # Default causes full rebuild if the script is run directly (without cvs-provided arguments) |
168 | REBUILDPERMALINKS=len(sys.argv)<3 |
169 | |
170 | # PERMALINK_ORDER allows you to define the order of items on permalink pages |
171 | PL_AUTHORS,PL_TIMESTAMP,PL_MESSAGE,PL_FILELIST,PL_PREVLINK,PL_NEXTLINK,PL_FEEDLINK=range(7) |
172 | PERMALINK_ORDER=[PL_TIMESTAMP,PL_PREVLINK,PL_NEXTLINK,PL_AUTHORS,PL_MESSAGE,PL_FILELIST,PL_FEEDLINK] |
173 | # But check this out: you can also include an xhtml string to be written at |
174 | # any point in the page! If the string begins with '<' it is parsed as xml. |
175 | # If it starts with any other character it is escaped as plain text. It is |
176 | # not possible to have incomplete tags which span built-in elements. |
177 | PERMALINK_ORDER.insert(0,"<h1><a href=\""+FEEDHOMELINK+"\">Visit Project Homepage</a></h1>") |
178 | |
179 | ################ LOAD EXTERNAL ################ |
180 | # load overrides from optional external configuration file (if specified) |
181 | try: |
182 | config_file=None |
183 | if len(sys.argv)>3: |
184 | config_file=os.path.join(os.path.dirname(sys.argv[0]),sys.argv[3]) |
185 | if len(sys.argv)==2: # running outside loginfo, just regenerate |
186 | config_file=os.path.join(os.path.dirname(sys.argv[0]),sys.argv[1]) |
187 | root="" |
188 | args=[] |
189 | if config_file: |
190 | if os.path.isfile(config_file): |
191 | execfile(config_file) |
192 | else: |
193 | sys.exit("cvslog2web could not find configuration file "+config_file) |
194 | except: |
195 | import traceback |
196 | print "The following exception occurred while processing external configuration file:" |
197 | traceback.print_exc() |
198 | print "Execution will continue with default settings" |
199 | |
200 | if not FEEDID: |
201 | FEEDID=FEEDSELFLINK |
202 | if HTMLTITLE=="": |
203 | HTMLTITLE=FEEDTITLE |
204 | if PERMALINK_CSS=="": |
205 | if PERMALINK_URL_PREFIX: |
206 | PERMALINK_CSS=PERMALINK_URL_PREFIX+HTMLOUT_CSS |
207 | else: |
208 | PERMALINK_CSS="../"*PERMALINK_STRUCTURE.count("/")+HTMLOUT_CSS |
209 | # if these are already set to an absolute pathes, then these are no-ops |
210 | HTMLOUT=os.path.join(PERMALINKDIR,HTMLOUT) |
211 | FEEDOUT=os.path.join(PERMALINKDIR,FEEDOUT) |
212 | |
213 | |
214 | ################################################################################ |
215 | ################ INPUT PARSING ################ |
216 | ################################################################################ |
217 | # You don't want to change much below here... |
218 | # It's icky code from here on out. |
219 | |
220 | import re, pickle, time, datetime, pwd |
221 | import xml.dom, xml.dom.minidom |
222 | |
223 | MAXHISTORY=max(MAXHISTORY,FEED_MAXHISTORY,HTMLOUT_MAXHISTORY) |
224 | |
225 | curtime=datetime.datetime.utcnow() |
226 | curtime_str=curtime.strftime("%Y-%m-%dT%H:%M:%S.%%06d+00:00") % curtime.microsecond |
227 | |
228 | if os.path.exists(TMPDIR): |
229 | if not os.path.isdir(TMPDIR): sys.exit("cvslog2web: file blocking TMPDIR "+TMPDIR) |
230 | else: |
231 | os.makedirs(TMPDIR) |
232 | |
233 | # verify CVSROOT |
234 | root=root.rstrip(os.sep) #strip any extra "/" at the end |
235 | if root and not os.path.isdir(root): sys.exit("cvslog2web: bad CVSROOT: "+root) |
236 | |
237 | # Pull the cvslog2web script's version number out of the CVS keyword replacement |
238 | SCRIPT_VERSION=re.findall("[0-9.]+",SCRIPT_REVISION) |
239 | if len(SCRIPT_VERSION)==0: |
240 | sys.exit("cvslog2web: invalid SCRIPT_REVISION setting (no version number) "+SCRIPT_REVISION) |
241 | elif len(SCRIPT_VERSION)>1: |
242 | print "WARNING cvslog2web SCRIPT_REVISION contains multiple version strings?", SCRIPT_REVISION |
243 | SCRIPT_VERSION=SCRIPT_VERSION[0] |
244 | |
245 | if len(sys.argv)<3: |
246 | status=os.sep |
247 | else: |
248 | status=sys.stdin.readline()[:-1] |
249 | if not status.startswith("Update of "): sys.exit("cvslog2web: unrecognized cvs output") |
250 | status=status[len("Update of "):] |
251 | |
252 | if not status.startswith(root+os.sep): sys.exit("cvslog2web: commit outside repository?") |
253 | cidir=status[len(root)+1:] #don't leave intro '/' on cidir |
254 | |
255 | #first word is the directory, pop it off |
256 | cipop="" |
257 | while cipop!=cidir: |
258 | if len(args)==0: sys.exit("cvslog2web: Unable to parse cvs output") |
259 | cipop=os.path.join(cipop,args.pop(0)) |
260 | del cipop |
261 | |
262 | # test to see if this is the result of an import vs. regular commit |
263 | if len(sys.argv)>=3 and sys.argv[2].endswith(" - Imported sources"): |
264 | files=args[:-len(" - Imported sources")] |
265 | imported=True |
266 | else: |
267 | # args (set from command line) is a string with a series of filename,oldvers,newvers values |
268 | # this regular expression parses the string into a list of tuples |
269 | # This RE is smart enough to handle filenames with spaces or commas in them! |
270 | files=re.findall("(.*?),([0-9.]+|NONE),([0-9.]+|NONE) "," ".join(args)+" ") |
271 | imported=False |
272 | |
273 | # This function is used to convert a dotted decimal version string to a version list: "1.2.3" -> [1,2,3] |
274 | # This form is much more applicable to comparison -- lexigraphic comparison of original version string will get it wrong |
275 | def version2list(s): |
276 | if s=="NONE": return [] |
277 | return map(int,s.split(".")) |
278 | # Now this next bit converts files into a dictionary mapping of |
279 | # names to [oldversion,newversion] lists, using the list form of version numbers |
280 | # (we're going to add on to the list of values for each file) |
281 | files=dict([(x[0], map(version2list,x[1:])) for x in files]) |
282 | |
283 | # Enough of the command line arguments, now parse the stdin message |
284 | # We still need to get the status flag (add/modify/remove) for each file |
285 | # First we need to get the list of file names in each section |
286 | # Each of these sections at this point is just a space-delimited list of file names |
287 | added=[]; modified=[]; removed=[]; message=[]; importedFiles=[]; importedTag="" |
288 | if len(sys.argv)>=3: |
289 | line=sys.stdin.readline() |
290 | section=[] #empty initial section |
291 | while line: |
292 | if section is message: |
293 | if imported and line=="Status:\n": |
294 | message=["".join(message).strip()+"\n"] |
295 | section=[] |
296 | else: |
297 | section.append(line) |
298 | elif line=="Log Message:\n": |
299 | section=message |
300 | elif line=="Added Files:\n": |
301 | section=added |
302 | elif line=="Modified Files:\n": |
303 | section=modified |
304 | elif line=="Removed Files:\n": |
305 | section=removed |
306 | elif imported and (line.startswith("Vendor Tag:") or line.startswith("Release Tags:")): |
307 | message.append(line) |
308 | section=importedFiles |
309 | if line.startswith("Release Tags:"): |
310 | importedTag=line[len("Release Tags:"):].strip() |
311 | elif section is importedFiles: |
312 | m=re.findall("([A-Z]) (.*)",line[:-1]) |
313 | if len(m)==1: |
314 | importedFiles.append(m[0]) |
315 | else: |
316 | section.append(line[1:-1]) #strip initial tab and trailing newline/linefeed |
317 | line=sys.stdin.readline() |
318 | added="".join(added) |
319 | modified="".join(modified) |
320 | removed="".join(removed) |
321 | message="".join(message).strip() |
322 | importedFiles.sort(lambda x,y: cmp(x[1],y[1])) |
323 | |
324 | # Don't do anything with new directories (everyone runs with 'update -dP' anyway right?) |
325 | # Directories don't matter until there's something in them |
326 | if len(sys.argv)>=3 and sys.argv[2].endswith(" - New directory"): |
327 | # waited this long because CVS throws a hissy fit ("broken pipe...") |
328 | #if you don't read the log message before quitting |
329 | sys.exit() |
330 | |
331 | # Constants for symbolic reference to information |
332 | ADDED="add" |
333 | MODIFIED="mod" |
334 | REMOVED="del" |
335 | GHOST="ghost" # this comes up later... we won't get this directly in the input |
336 | |
337 | # We have the file names from the command line in 'files' (parsed above). |
338 | # Now we need to see which files are in which status section. |
339 | # A bit tricky because each space could be separating files |
340 | # or could be part of a filename itself. |
341 | |
342 | # Spaces are such a pain, especially when they are being used |
343 | # as the delimiter, and your input doesn't escape the "real" spaces! Grrr. |
344 | # *** Still not perfect... if "foo" and "foo bar" are both |
345 | # involved, this could confuse it. *** |
346 | def processFiles(l,tag,out): |
347 | partial="" |
348 | for f in l.split(" "): |
349 | f=partial+f |
350 | if f in files: |
351 | files[f].append(tag) |
352 | out[os.path.join(cidir,f)]=tuple(files[f]) |
353 | partial="" |
354 | else: |
355 | partial=f+" " |
356 | if partial.strip(): |
357 | print "WARNING: partial filename in", tag, "section: '"+partial.strip()+"'" |
358 | paths={} |
359 | processFiles(added,ADDED,paths) |
360 | processFiles(modified,MODIFIED,paths) |
361 | processFiles(removed,REMOVED,paths) |
362 | |
363 | # now paths is a dictionary mapping full path to (oldv,newv,status) tuples |
364 | |
365 | # We've got our input regarding the current log entry, |
366 | # need to compare that against previous entry and see if it's |
367 | # all part of the same commit (since this script will be called |
368 | # separately for each directory involved in the commit, but |
369 | # we want them all to be associated in the same log entry) |
370 | |
371 | # load the last checkin's message and time, with a default value if file not found |
372 | def readfile(name,default): |
373 | try: f=open(os.path.join(TMPDIR,name),"rb") |
374 | except: return default #doesn't exist, that's ok |
375 | else: |
376 | val=pickle.load(f) |
377 | f.close() |
378 | return val |
379 | |
380 | # lasttime is the datetime object from the last call to the script |
381 | lasttime=readfile("lasttime",curtime-datetime.timedelta(0,TIMEOUT)) |
382 | |
383 | # history is everything we know about each entry, up to MAXHISTORY long |
384 | # The format is described in the next several lines |
385 | history=readfile("history",[]) |
386 | |
387 | # These enumerations define the basic fields in each entry in history |
388 | PATH,DATETIME,TIMESTAMP,AUTHORS,MESSAGE,IMPORTEDFILES,IMPORTEDTAG=range(7) |
389 | |
390 | ### History format ### |
391 | # History is a pretty major, and somewhat complex structure. |
392 | # It's basically just a list of entries, where each entry is a list indexed |
393 | # by the enumerations listed above, defining basic format the log entries |
394 | # PATH element holds the 'paths' variable (dictionary mapping paths to version numbers and status flag) |
395 | # ID is a string holding the entries unique and immutable ID for the feed (also embeds an initial-commit timestamp) |
396 | # TIMESTAMP is the last updated timestamp (datetime object) |
397 | # AUTHORS is a list of (name,email) tuples (both are strings) |
398 | # MESSAGE is a string holding the log message read from stdin |
399 | ###################### |
400 | |
401 | # get the user's name and username |
402 | # This might be a unix-only feature, not the end of the world if you |
403 | # have to rely on the contact list in the configuration section, or |
404 | # substitute another method |
405 | pw_db=pwd.getpwuid(os.getuid()) |
406 | user=pw_db[0] # the "short" name, e.g. 'ejt' |
407 | user_name=pw_db[4] # the "full" name, e.g. 'Ethan Tira-Thompson' |
408 | |
409 | # If within the TIMEOUT and have the same message, merge |
410 | # the current file list with the first entry of the history |
411 | # Otherwise, add a new entry (popping old entries if len(history)>MAXHISTORY...) |
412 | droppedHistory=[] # stores popped entries, reused when rebuilding permalinks |
413 | if len(sys.argv)<3: |
414 | pass # rebuild shouldn't change any history entries |
415 | elif curtime-lasttime<datetime.timedelta(0,TIMEOUT) and len(history)>0 and history[0][MESSAGE]==message and not imported: |
416 | # merge is a little interesting: |
417 | # If the file is added and then modified with the same log message, |
418 | # merge as still added, just with the later revision number |
419 | # Removed and re-added is modified |
420 | # Similarly, modified then removed is just removed |
421 | # However, added then removed is as if never existed, but still need |
422 | # to store file info, (in case of re-add) hence the "ghost" status |
423 | def merge(v1,v2): |
424 | ov=min(v1[0],v2[0]) |
425 | nv=max(v1[1],v2[1]) |
426 | if v1[2]==v2[2]: |
427 | t=v1[2] |
428 | elif v1[2]==ADDED and v2[2]==REMOVED: |
429 | t=GHOST |
430 | elif v1[2]==REMOVED and v2[2]==ADDED: |
431 | t=MODIFIED |
432 | elif v1[2]==ADDED or v2[2]==ADDED: |
433 | t=ADDED |
434 | elif v1[2]==REMOVED or v2[2]==REMOVED: |
435 | t=REMOVED |
436 | else: |
437 | t=GHOST |
438 | return (ov,nv,t) |
439 | |
440 | for k,v in paths.iteritems(): |
441 | if history[0][PATH].setdefault(k,v)!=v: |
442 | history[0][PATH][k]=merge(history[0][PATH][k],v) |
443 | history[0][TIMESTAMP]=curtime_str |
444 | history[0][AUTHORS][user]=CONTACT.get(user,(user_name,user+"@"+DOMAIN)) |
445 | |
446 | else: # push paths as a new entry on its own |
447 | authors={user:CONTACT.get(user,(user,user+"@"+DOMAIN))} |
448 | history.insert(0,[paths,curtime,curtime_str,authors,message,importedFiles,importedTag]) |
449 | while len(history)>MAXHISTORY: droppedHistory.append(history.pop()) |
450 | |
451 | |
452 | ################################################################################ |
453 | ################ STATUS STORAGE ################ |
454 | ################################################################################ |
455 | # We want to write out the history as soon as possible to reduce the risk |
456 | # of a processing error causing us to drop or corrupt an entry. At least |
457 | # once it's stored in the file, if there's an error I can debug it and then |
458 | # we can regenerate the output. |
459 | if len(sys.argv)>=3: # don't touch files if it's a rebuild |
460 | f=open(os.path.join(TMPDIR,"lasttime"),"wb") |
461 | pickle.dump(curtime,f) |
462 | f.close() |
463 | f=open(os.path.join(TMPDIR,"history"),"wb") |
464 | pickle.dump(history,f) |
465 | f.close() |
466 | |
467 | |
468 | ################################################################################ |
469 | ################ FUNCTION DECLARATIONS ################ |
470 | ################################################################################ |
471 | |
472 | # shorthand for adding a subnode with a particular name and textual content |
473 | def appendTextualTag(node,name,str): |
474 | n=node.appendChild(doc.createElement(name)) |
475 | n.appendChild(doc.createTextNode(str)) |
476 | node.appendChild(doc.createTextNode("\n")) |
477 | return n |
478 | |
479 | # quick-n-dirty version of set(), when used with reduce() |
480 | def collect(x,y): |
481 | if y not in x: x.append(y) |
482 | return x |
483 | |
484 | # removes first DROP_MODULE directory names from path |
485 | def dropModules(path): |
486 | for i in range(DROP_MODULE): |
487 | x=path.find(os.sep) |
488 | if x==-1: |
489 | return "" |
490 | path=path[x+1:] |
491 | return path |
492 | |
493 | # Converts a list of file names to a more compact form, grouping |
494 | # those in directories together: |
495 | # /foo/bar and /foo/baz become /foo/{bar,baz} |
496 | def makeStr(names): |
497 | dirs=reduce(collect,[os.path.dirname(x) for x in names],[]) |
498 | dirs.sort(lambda x,y:-cmp(len(x),len(y))) #go from longest (deepest) to shortest (shallowest) |
499 | common={} |
500 | remain=names[:] |
501 | for d in dirs: |
502 | paths=filter(lambda x: x.startswith(d),remain) |
503 | paths.sort() |
504 | if len(paths)>1: |
505 | for p in paths: |
506 | common.setdefault(dropModules(d),[]).append(p[len(d)+1:]) |
507 | remain.remove(p) |
508 | for p in remain: |
509 | common.setdefault(dropModules(os.path.dirname(p)),[]).append(dropModules(p)) |
510 | ks=common.keys() |
511 | ks.sort() |
512 | strs=[] |
513 | for k in ks: |
514 | v=common[k] |
515 | if len(v)>1: |
516 | if k: |
517 | strs.append(os.path.join(k,"{"+(",".join(v))+"}")) |
518 | else: |
519 | strs.append(", ".join(v)) |
520 | else: |
521 | strs.append(v[0]) |
522 | return ", ".join(strs) |
523 | |
524 | # This older version is a bit more simplistic, but doesn't |
525 | # handle singleton paths as nicely |
526 | #def makeStr(names): |
527 | # dirs=reduce(collect,[os.path.dirname(x) for x in names],[]) |
528 | # strs=[] |
529 | # for d in dirs: |
530 | # files=map(os.path.basename,filter(lambda x: x.startswith(d),names)) |
531 | # if len(files)==1: |
532 | # strs.append(os.path.join(d,files)) |
533 | # else: |
534 | # strs.append(os.path.join(d,"{"+",".join(files)+"}")) |
535 | # return " ".join(strs) |
536 | |
537 | # convert version list to string representation |
538 | # [major, minor, patch, ...] -> major.minor.patch.... |
539 | def vers2str(v): |
540 | return ".".join(map(str,v)) |
541 | |
542 | |
543 | # this part computes titles and links for each entry in the history |
544 | histFiles=[] |
545 | for entry in history: |
546 | # generate title string |
547 | addstr=makeStr([k for k,v in entry[PATH].iteritems() if v[2]==ADDED]) |
548 | modstr=makeStr([k for k,v in entry[PATH].iteritems() if v[2]==MODIFIED]) |
549 | remstr=makeStr([k for k,v in entry[PATH].iteritems() if v[2]==REMOVED]) |
550 | files=[] |
551 | if len(addstr)>0: |
552 | files.append("Added "+addstr) |
553 | if len(modstr)>0: |
554 | files.append("Modified "+modstr) |
555 | if len(remstr)>0: |
556 | files.append("Removed "+remstr) |
557 | files="; ".join(files) |
558 | histFiles.append(files) |
559 | |
560 | # Links are a bit tricky; this will generate a dictionary of |
561 | # "common paths", where each key is the path, value is a list of |
562 | # files (may include singleton paths) within that path |
563 | def makeLinks(paths): |
564 | dirs=reduce(collect,[os.path.dirname(x) for x in paths],[]) |
565 | dirs.sort(lambda x,y:-cmp(len(x),len(y))) #go from longest (deepest) to shortest (shallowest) |
566 | links={} |
567 | remain=paths |
568 | for d in dirs: |
569 | paths=filter(lambda x: x.startswith(d),remain) |
570 | if len(paths)>1: |
571 | paths.sort() |
572 | for p in paths: |
573 | info=entry[PATH][p] |
574 | links.setdefault(dropModules(d),[]).append(dict(path=p,filename=p[len(d)+1:],oldversion=vers2str(info[0]),newversion=vers2str(info[1]))) |
575 | remain.remove(p) |
576 | for p in remain: |
577 | info=entry[PATH][p] |
578 | links.setdefault(dropModules(os.path.dirname(p)),[]).append(dict(path=p,filename=dropModules(p),oldversion=vers2str(info[0]),newversion=vers2str(info[1]))) |
579 | return links |
580 | |
581 | histLinks=[] |
582 | if ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE: |
583 | for entry in history: |
584 | addlinks=makeLinks([k for k,v in entry[PATH].iteritems() if v[2]==ADDED]) |
585 | modlinks=makeLinks([k for k,v in entry[PATH].iteritems() if v[2]==MODIFIED]) |
586 | remlinks=makeLinks([k for k,v in entry[PATH].iteritems() if v[2]==REMOVED]) |
587 | links={} |
588 | if len(addlinks)>0: |
589 | links["Added: "]=addlinks |
590 | if len(modlinks)>0: |
591 | links["Modified: "]=modlinks |
592 | if len(remlinks)>0: |
593 | links["Removed: "]=remlinks |
594 | histLinks.append(links) |
595 | else: |
596 | for entry in history: |
597 | histLinks.append({"Diff: ": makeLinks(entry[PATH].keys())}) |
598 | |
599 | def genPermalink(t): |
600 | permdir=t.strftime(PERMALINK_STRUCTURE) % {"us":t.microsecond} |
601 | return PERMALINK_URL_PREFIX+permdir |
602 | |
603 | def genPermafile(t): |
604 | permdir=t.strftime(PERMALINK_STRUCTURE) % {"us":t.microsecond} |
605 | return os.path.join(PERMALINKDIR,permdir.replace("/",os.sep)) |
606 | |
607 | |
608 | ################################################################################ |
609 | ################ RSS (ATOM) OUTPUT ################ |
610 | ################################################################################ |
611 | |
612 | if FEEDOUT: |
613 | dom=xml.dom.getDOMImplementation() |
614 | doc=dom.createDocument("http://www.w3.org/2005/Atom","feed",None) |
615 | feed=doc.documentElement |
616 | doc.insertBefore(doc.createProcessingInstruction("xml-stylesheet",'href="Atom.css" type="text/css"'),feed) |
617 | feed.setAttribute("xmlns","http://www.w3.org/2005/Atom") |
618 | feed.appendChild(doc.createTextNode("\n")) |
619 | |
620 | # feed header tags |
621 | appendTextualTag(feed,"id",FEEDID) |
622 | appendTextualTag(feed,"title",FEEDTITLE) |
623 | if FEEDHOMELINK: |
624 | linknode=feed.appendChild(doc.createElement("link")) |
625 | linknode.setAttribute("rel","alternate") |
626 | linknode.setAttribute("href",FEEDHOMELINK) |
627 | feed.appendChild(doc.createTextNode("\n")) |
628 | if FEEDSELFLINK: |
629 | linknode=feed.appendChild(doc.createElement("link")) |
630 | linknode.setAttribute("rel","self") |
631 | linknode.setAttribute("href",FEEDSELFLINK) |
632 | feed.appendChild(doc.createTextNode("\n")) |
633 | if FEEDLOGO: |
634 | appendTextualTag(feed,"logo",FEEDLOGO) |
635 | if FEEDICON: |
636 | appendTextualTag(feed,"icon",FEEDICON) |
637 | if FEEDDESCRIPTION: |
638 | appendTextualTag(feed,"subtitle",FEEDDESCRIPTION) |
639 | appendTextualTag(feed,"updated",curtime_str) |
640 | generator=feed.appendChild(doc.createElement("generator")) |
641 | generator.setAttribute("uri",SCRIPT_URL) |
642 | generator.setAttribute("version",SCRIPT_VERSION) |
643 | generator.appendChild(doc.createTextNode("cvslog2web")) |
644 | feed.appendChild(doc.createTextNode("\n")) |
645 | |
646 | for (entry,files,links) in zip(history,histFiles,histLinks)[:FEED_MAXHISTORY]: |
647 | msg=entry[MESSAGE].splitlines() |
648 | |
649 | # entry header tags |
650 | node=feed.appendChild(doc.createElement("entry")) |
651 | node.appendChild(doc.createTextNode("\n")) |
652 | t=entry[DATETIME] |
653 | appendTextualTag(node,"id",entry[DATETIME].strftime("tag:%%s,%Y-%m-%d:%%05d.%%06d")%(DOMAIN,t.hour*60*60+t.minute*60+t.second,t.microsecond)) |
654 | if ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and len(msg)>0: |
655 | appendTextualTag(node,"title",FEEDENTRYPREFIX+msg[0]) |
656 | elif ENTRYTITLE==TITLE_FILE_LIST or len(msg)==0: |
657 | appendTextualTag(node,"title",FEEDENTRYPREFIX+files) |
658 | else: |
659 | sys.exit("cvslog2web: bad ENTRYTITLE setting") |
660 | appendTextualTag(node,"updated",entry[TIMESTAMP]) |
661 | for n in entry[AUTHORS].itervalues(): |
662 | author=node.appendChild(doc.createElement("author")) |
663 | author.appendChild(doc.createElement("name")).appendChild(doc.createTextNode(n[0])) |
664 | if len(n)>1 and n[1]!="": |
665 | author.appendChild(doc.createElement("email")).appendChild(doc.createTextNode(n[1])) |
666 | node.appendChild(doc.createTextNode("\n")) |
667 | if PERMALINKDIR: |
668 | linknode=node.appendChild(doc.createElement("link")) |
669 | linknode.setAttribute("rel","alternate") |
670 | linknode.setAttribute("href",genPermalink(entry[DATETIME])) |
671 | linknode.setAttribute("type","text/html") |
672 | if FEEDRELATED: |
673 | for ll in links.itervalues(): |
674 | for group in ll.itervalues(): |
675 | for l in group: |
676 | linknode=node.appendChild(doc.createElement("link")) |
677 | linknode.setAttribute("rel","related") |
678 | linknode.setAttribute("title",l["path"]) |
679 | if l["oldversion"] and l["newversion"]: |
680 | linknode.setAttribute("href",(DIFFLINKFORMAT % l).replace(" ","%20")) |
681 | else: |
682 | linknode.setAttribute("href",(VIEWLINKFORMAT % l).replace(" ","%20")) |
683 | linknode.setAttribute("type","text/html") |
684 | |
685 | # CONTENT section |
686 | content=node.appendChild(doc.createElement("content")) |
687 | content.setAttribute("type","xhtml") |
688 | content=content.appendChild(doc.createElement("div")) |
689 | content.setAttribute("xmlns","http://www.w3.org/1999/xhtml") |
690 | content.appendChild(doc.createTextNode("\n")) |
691 | for i,m in enumerate(msg): |
692 | if i==0 and ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and (REPEAT_FIRST_LINE==REPEAT_NEVER or REPEAT_FIRST_LINE==REPEAT_WHEN_MULTIPLE and len(msg)==1): |
693 | continue |
694 | appendTextualTag(content,"div",m).setAttribute("style","padding:.25em 0;") |
695 | # End of message embedding |
696 | |
697 | #now add links to content |
698 | filelist=content.appendChild(doc.createElement("div")) |
699 | if len(msg)>1: |
700 | filelist.setAttribute("style","padding:.6em 0;") |
701 | for pre,ll in links.iteritems(): |
702 | diffs=filelist.appendChild(doc.createElement("div")) |
703 | diffs.setAttribute("style","padding:.25em 0;") |
704 | diffs.appendChild(doc.createTextNode(pre)) |
705 | diffs=diffs.appendChild(doc.createElement("tt")) |
706 | firstSet=True |
707 | for k,group in ll.iteritems(): |
708 | if not firstSet: |
709 | diffs.appendChild(doc.createTextNode(", ")) |
710 | if len(group)>1 and k: |
711 | diffs.appendChild(doc.createTextNode(os.path.join(k,"{"))) |
712 | firstLink=True |
713 | for l in group: |
714 | if not firstLink: |
715 | diffs.appendChild(doc.createTextNode(", ")) |
716 | a=diffs.appendChild(doc.createElement("a")) |
717 | if l["oldversion"] and l["newversion"]: |
718 | a.setAttribute("href",(DIFFLINKFORMAT % l).replace(" ","%20")) |
719 | else: |
720 | a.setAttribute("href",(VIEWLINKFORMAT % l).replace(" ","%20")) |
721 | a.appendChild(doc.createTextNode(l["filename"])) |
722 | firstLink=False |
723 | if len(group)>1 and k: |
724 | diffs.appendChild(doc.createTextNode("}")) |
725 | firstSet=False |
726 | filelist.appendChild(doc.createTextNode("\n")) |
727 | |
728 | #end of content |
729 | node.appendChild(doc.createTextNode("\n")) |
730 | |
731 | f=open(FEEDOUT,"wb") |
732 | doc.writexml(f) |
733 | f.close() |
734 | doc.unlink() |
735 | if VERBOSE: |
736 | print "Feed update successful" |
737 | |
738 | |
739 | ################################################################################ |
740 | ################ HTML OUTPUT ################ |
741 | ################################################################################ |
742 | |
743 | # some functions for HTML output, shared by HTMLOUT and PERMALINK output |
744 | def HTMLHeader(dom,title,css,rootclass): |
745 | doctype=dom.createDocumentType("html","//W3C//DTD XHTML 1.0 Transitional//EN","http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd") |
746 | doc=dom.createDocument("http://www.w3.org/1999/xhtml","html",doctype) |
747 | html=doc.documentElement |
748 | html.setAttribute("xmlns","http://www.w3.org/1999/xhtml") |
749 | html.appendChild(doc.createTextNode("\n")) |
750 | head=html.appendChild(doc.createElement("head")) |
751 | appendTextualTag(head,"title",title) |
752 | if css: |
753 | link=head.appendChild(doc.createElement("link")) |
754 | link.setAttribute("rel","stylesheet") |
755 | link.setAttribute("type","text/css") |
756 | link.setAttribute("href",css) |
757 | head.appendChild(doc.createTextNode("\n")) |
758 | if FEEDOUT and FEEDSELFLINK: |
759 | link=head.appendChild(doc.createElement("link")) |
760 | link.setAttribute("rel","alternate") |
761 | link.setAttribute("title","Atom Syndication Feed") |
762 | link.setAttribute("type","application/atom+xml") |
763 | link.setAttribute("href",FEEDSELFLINK) |
764 | head.appendChild(doc.createTextNode("\n")) |
765 | body=html.appendChild(doc.createElement("body")) |
766 | body.appendChild(doc.createTextNode("\n")) |
767 | rootdiv=body.appendChild(doc.createElement("div")) |
768 | body.appendChild(doc.createTextNode("\n")) |
769 | rootdiv.appendChild(doc.createTextNode("\n")) |
770 | rootdiv.setAttribute("class",rootclass) |
771 | return (doc,rootdiv) |
772 | |
773 | def appendLinks(filelist,pre,ll): |
774 | filelist=filelist.appendChild(doc.createElement("p")) |
775 | appendTextualTag(filelist,"span",pre).setAttribute("class","cvslog2web_filestatus") |
776 | firstSet=True |
777 | for k,v in ll.iteritems(): |
778 | if not firstSet: |
779 | filelist.appendChild(doc.createTextNode(", ")) |
780 | if len(v)>1 and k: |
781 | filelist.appendChild(doc.createTextNode(os.path.join(k,"{"))) |
782 | firstLink=True |
783 | for l in v: |
784 | if not firstLink: |
785 | filelist.appendChild(doc.createTextNode(", ")) |
786 | a=filelist.appendChild(doc.createElement("a")) |
787 | if HTMLOUT_TARGET: |
788 | a.setAttribute("target",HTMLOUT_TARGET) |
789 | if l["oldversion"] and l["newversion"]: |
790 | a.setAttribute("href",(DIFFLINKFORMAT % l).replace(" ","%20")) |
791 | else: |
792 | a.setAttribute("href",(VIEWLINKFORMAT % l).replace(" ","%20")) |
793 | a.appendChild(doc.createTextNode(l["filename"])) |
794 | firstLink=False |
795 | if len(v)>1 and k: |
796 | filelist.appendChild(doc.createTextNode("}")) |
797 | firstSet=False |
798 | |
799 | |
800 | ################ HTML STANDALONE OUTPUT ################ |
801 | |
802 | if HTMLOUT: |
803 | dom=xml.dom.getDOMImplementation() |
804 | if not HTMLOUT_STANDALONE: |
805 | if "createDocumentFragment" in dir(dom): |
806 | doc=dom.createDocumentFragment() |
807 | else: |
808 | doc=dom.createDocument(None,None,None) |
809 | rootdiv=doc.appendChild(doc.createElement("div")) |
810 | rootdiv.setAttribute("xmlns","http://www.w3.org/1999/xhtml") |
811 | rootdiv.appendChild(doc.createTextNode("\n")) |
812 | rootdiv.setAttribute("class","cvslog2web_index") |
813 | else: |
814 | doc,rootdiv=HTMLHeader(dom,HTMLTITLE,HTMLOUT_CSS,"cvslog2web_index") |
815 | |
816 | for (entry,files,links) in zip(history,histFiles,histLinks)[:HTMLOUT_MAXHISTORY]: |
817 | msg=entry[MESSAGE].splitlines() |
818 | |
819 | # entry header tags |
820 | node=rootdiv.appendChild(doc.createElement("div")) |
821 | node.setAttribute("class","cvslog2web_entry") |
822 | node.appendChild(doc.createTextNode("\n")) |
823 | titlenode=node.appendChild(doc.createElement("div")) |
824 | titlenode.setAttribute("class","cvslog2web_title") |
825 | if PERMALINKDIR: |
826 | a=appendTextualTag(titlenode,"a",HTMLOUT_ENTRYPREFIX) |
827 | if HTMLOUT_TARGET: |
828 | a.setAttribute("target",HTMLOUT_TARGET) |
829 | a.setAttribute("href",genPermalink(entry[DATETIME])) |
830 | if ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and len(msg)>0: appendTextualTag(a,"span",msg[0]).setAttribute("class","cvslog2web_message") |
831 | elif ENTRYTITLE==TITLE_FILE_LIST or len(msg)==0: appendTextualTag(a,"span",files).setAttribute("class","cvslog2web_filelist") |
832 | else: sys.exit("cvslog2web: bad ENTRYTITLE setting") |
833 | else: |
834 | titlenode.appendChild(doc.createTextNode(HTMLOUT_ENTRYPREFIX)) |
835 | if ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE: appendTextualTag(titlenode,"span",msg[0]).setAttribute("class","cvslog2web_message") |
836 | elif ENTRYTITLE==TITLE_FILE_LIST: appendTextualTag(titlenode,"span",files).setAttribute("class","cvslog2web_filelist") |
837 | else: sys.exit("cvslog2web: bad ENTRYTITLE setting") |
838 | titlenode.appendChild(doc.createTextNode(" ")) |
839 | authors=titlenode.appendChild(doc.createElement("span")) |
840 | authors.setAttribute("class","cvslog2web_authors") |
841 | authors.appendChild(doc.createTextNode("("+",".join([k for k,n in entry[AUTHORS].iteritems()])+")")) |
842 | |
843 | # Content section |
844 | if HTMLOUT_MESSAGE: |
845 | content=node.appendChild(doc.createElement("div")) |
846 | content.setAttribute("class","cvslog2web_message") |
847 | content.appendChild(doc.createTextNode("\n")) |
848 | for i,m in enumerate(msg): |
849 | if i==0 and ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and (REPEAT_FIRST_LINE==REPEAT_NEVER or REPEAT_FIRST_LINE==REPEAT_WHEN_MULTIPLE and len(msg)==1): |
850 | continue |
851 | appendTextualTag(content,"p",m) |
852 | if len(msg)==1 and ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and (REPEAT_FIRST_LINE==REPEAT_NEVER or REPEAT_FIRST_LINE==REPEAT_WHEN_MULTIPLE): |
853 | node.removeChild(content) |
854 | |
855 | if len(links)>0 and HTMLOUT_FILELIST and ENTRYTITLE!=TITLE_FILE_LIST: |
856 | #now add links to content |
857 | filelist=node.appendChild(doc.createElement("div")) |
858 | filelist.setAttribute("class","cvslog2web_filelist") |
859 | filelist.appendChild(doc.createTextNode("\n")) |
860 | for pre,l in links.iteritems(): |
861 | appendLinks(filelist,pre,l) |
862 | |
863 | #end of content |
864 | node.appendChild(doc.createTextNode("\n")) |
865 | |
866 | gen=rootdiv.appendChild(doc.createElement("div")) |
867 | gen.setAttribute("class","cvslog2web_credit") |
868 | gen.appendChild(doc.createTextNode("Generated by ")) |
869 | a=appendTextualTag(gen,"a","cvslog2web") |
870 | a.setAttribute("target","_top") |
871 | a.setAttribute("href",SCRIPT_URL) |
872 | gen.appendChild(doc.createTextNode(SCRIPT_VERSION)) |
873 | |
874 | f=open(HTMLOUT,"wb") |
875 | if HTMLOUT_STANDALONE or "createDocumentFragment" in dir(dom): |
876 | doc.writexml(f) |
877 | else: |
878 | # createDocumentFragment unavailable, hack it and strip the xml processing instruction |
879 | s=doc.toxml() |
880 | s=s[s.find("\n")+1:] |
881 | f.write(s) |
882 | f.close() |
883 | doc.unlink() |
884 | if VERBOSE: |
885 | print "HTML update successful" |
886 | |
887 | |
888 | ################ HTML PERMALINK OUTPUT ################ |
889 | |
890 | def normalizeLink(link): |
891 | if not PERMALINK_URL_PREFIX: |
892 | return "../"*PERMALINK_STRUCTURE.count("/")+link |
893 | return link |
894 | |
895 | def writePermalink(entry,files,links,prevLink="",nextLink=""): |
896 | if prevLink: prevLink=normalizeLink(prevLink) |
897 | if nextLink: nextLink=normalizeLink(nextLink) |
898 | |
899 | permalink=genPermalink(entry[DATETIME]) |
900 | permafile=genPermafile(entry[DATETIME]) |
901 | permdir=os.path.dirname(permafile) |
902 | if os.path.exists(permdir): |
903 | if not os.path.isdir(permdir): sys.exit("cvslog2web: file blocking PERMALINKDIR "+permdir) |
904 | else: |
905 | os.makedirs(permdir) |
906 | |
907 | msg=entry[MESSAGE].splitlines() |
908 | |
909 | # entry header tags |
910 | if ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and len(msg)>0: |
911 | title=FEEDENTRYPREFIX+msg[0] |
912 | elif ENTRYTITLE==TITLE_FILE_LIST or len(msg)==0: |
913 | title=FEEDENTRYPREFIX+files |
914 | else: |
915 | sys.exit("cvslog2web: bad ENTRYTITLE setting") |
916 | doc,rootdiv=HTMLHeader(dom,title,PERMALINK_CSS,"cvslog2web_permalink") |
917 | node=rootdiv.appendChild(doc.createElement("div")) |
918 | node.setAttribute("class","cvslog2web_entry") |
919 | node.appendChild(doc.createTextNode("\n")) |
920 | |
921 | for section in PERMALINK_ORDER: |
922 | # Previous link |
923 | if section==PL_PREVLINK: |
924 | n=node.appendChild(doc.createElement("div")) |
925 | if prevLink: |
926 | n.setAttribute("class","cvslog2web_nav_prev") |
927 | n.setAttribute("onClick","window.location.href='"+prevLink+"'") |
928 | a=n.appendChild(doc.createElement("a")) |
929 | a.setAttribute("href",prevLink) |
930 | a.appendChild(doc.createTextNode(PL_PREVTEXT)) |
931 | else: |
932 | n.setAttribute("class","cvslog2web_nav_prev_disabled") |
933 | n.appendChild(doc.createTextNode(PL_PREVTEXT)) |
934 | |
935 | # Previous link |
936 | elif section==PL_NEXTLINK: |
937 | n=node.appendChild(doc.createElement("div")) |
938 | if nextLink: |
939 | n.setAttribute("class","cvslog2web_nav_next") |
940 | n.setAttribute("onClick","window.location.href='"+nextLink+"'") |
941 | a=n.appendChild(doc.createElement("a")) |
942 | a.setAttribute("href",nextLink) |
943 | a.appendChild(doc.createTextNode(PL_NEXTTEXT)) |
944 | else: |
945 | n.setAttribute("class","cvslog2web_nav_next_disabled") |
946 | n.appendChild(doc.createTextNode(PL_NEXTTEXT)) |
947 | |
948 | # Feed link |
949 | elif section==PL_FEEDLINK and FEEDOUT and FEEDSELFLINK: |
950 | n=node.appendChild(doc.createElement("div")) |
951 | n.setAttribute("class","cvslog2web_feedlink") |
952 | a=n.appendChild(doc.createElement("a")) |
953 | a.setAttribute("href",FEEDSELFLINK) |
954 | i=a.appendChild(doc.createElement("img")) |
955 | i.setAttribute("src",normalizeLink(PERMALINK_URL_PREFIX+"atom_feed.png")) |
956 | i.setAttribute("width","84") |
957 | i.setAttribute("height","15") |
958 | a.appendChild(doc.createTextNode(" ")) |
959 | i=a.appendChild(doc.createElement("img")) |
960 | i.setAttribute("src",normalizeLink(PERMALINK_URL_PREFIX+"feed_icon.png")) |
961 | i.setAttribute("width","15") |
962 | i.setAttribute("height","15") |
963 | |
964 | # Title (Timestamp) |
965 | elif section==PL_TIMESTAMP: |
966 | if time.daylight: |
967 | t=entry[DATETIME]-datetime.timedelta(seconds=time.altzone) |
968 | tz=time.tzname[1] |
969 | else: |
970 | t=entry[DATETIME]-datetime.timedelta(seconds=time.timezone) |
971 | tz=time.tzname[0] |
972 | title=t.strftime("Commited %a. %B %d, %Y at %I:%M:%S %p "+tz) |
973 | appendTextualTag(node,"div",title).setAttribute("class","cvslog2web_timestamp") |
974 | |
975 | # Authors |
976 | elif section==PL_AUTHORS: |
977 | authors=node.appendChild(doc.createElement("div")) |
978 | authors.setAttribute("class","cvslog2web_authors") |
979 | authors.appendChild(doc.createTextNode("\nfrom ")) |
980 | first=True |
981 | for n in entry[AUTHORS].itervalues(): |
982 | if first: first=False |
983 | else: authors.appendChild(doc.createTextNode(", ")) |
984 | l=appendTextualTag(authors,"a",n[0]) |
985 | l.setAttribute("href","mailto:"+n[1]) |
986 | |
987 | # Message |
988 | elif section==PL_MESSAGE: |
989 | content=node.appendChild(doc.createElement("div")) |
990 | content.setAttribute("class","cvslog2web_message") |
991 | for i,m in enumerate(msg): |
992 | content.appendChild(doc.createTextNode("\n"+m)) |
993 | content.appendChild(doc.createElement("br")) |
994 | content.appendChild(doc.createTextNode("\n")) |
995 | |
996 | # Links |
997 | elif section==PL_FILELIST: |
998 | links=node.appendChild(doc.createElement("div")) |
999 | links.setAttribute("class","cvslog2web_filelist") |
1000 | links.appendChild(doc.createTextNode("\n")) |
1001 | spaths=entry[PATH].keys() |
1002 | spaths.sort() |
1003 | for path in spaths: |
1004 | info=entry[PATH][path] |
1005 | status=links.appendChild(doc.createElement("span")) |
1006 | status.setAttribute("class","cvslog2web_filestatus") |
1007 | if info[2]==ADDED: status.appendChild(doc.createTextNode("A")) |
1008 | elif info[2]==MODIFIED: status.appendChild(doc.createTextNode("M")) |
1009 | elif info[2]==REMOVED: status.appendChild(doc.createTextNode("R")) |
1010 | else: sys.exit("cvslog2web: bad entry[PATH] status flag") |
1011 | a=links.appendChild(doc.createElement("a")) |
1012 | lid=dict(path=path,filename=os.path.basename(path),oldversion=vers2str(info[0]),newversion=vers2str(info[1])) |
1013 | if lid["oldversion"] and lid["newversion"]: |
1014 | a.setAttribute("href",(DIFFLINKFORMAT % lid).replace(" ","%20")) |
1015 | else: |
1016 | a.setAttribute("href",(VIEWLINKFORMAT % lid).replace(" ","%20")) |
1017 | a.appendChild(doc.createTextNode(dropModules(path))) |
1018 | links.appendChild(doc.createElement("br")) |
1019 | links.appendChild(doc.createTextNode("\n")) |
1020 | if len(entry)>IMPORTEDFILES and len(entry)>IMPORTEDTAG: #just for backward compatability |
1021 | for f in entry[IMPORTEDFILES]: |
1022 | status=links.appendChild(doc.createElement("span")) |
1023 | status.setAttribute("class","cvslog2web_filestatus") |
1024 | status.appendChild(doc.createTextNode(f[0])) |
1025 | a=links.appendChild(doc.createElement("a")) |
1026 | lid=dict(path=f[1],filename=os.path.basename(f[1]),oldversion="",newversion=entry[IMPORTEDTAG]) |
1027 | a.setAttribute("href",(VIEWLINKFORMAT % lid).replace(" ","%20")) |
1028 | a.appendChild(doc.createTextNode(dropModules(f[1]))) |
1029 | links.appendChild(doc.createElement("br")) |
1030 | links.appendChild(doc.createTextNode("\n")) |
1031 | |
1032 | # Special content (user defined string?) |
1033 | elif section.__class__=="".__class__: |
1034 | if not section.strip().startswith("<"): |
1035 | # doesn't start with a tag, we append as text |
1036 | node.appendChild(doc.createTextNode(section)) |
1037 | else: |
1038 | # Have to parse it so we can append it |
1039 | # If we just appended it as text, all the good stuff would be escaped away |
1040 | subdoc=xml.dom.minidom.parseString(section) |
1041 | for n in subdoc.childNodes: |
1042 | node.appendChild(subdoc.removeChild(n)) |
1043 | subdoc.unlink() |
1044 | |
1045 | #put a return in the source after eact section |
1046 | node.appendChild(doc.createTextNode("\n")) |
1047 | |
1048 | gen=rootdiv.parentNode.appendChild(doc.createElement("div")) |
1049 | gen.setAttribute("class","cvslog2web_credit") |
1050 | gen.appendChild(doc.createTextNode("Generated by ")) |
1051 | a=appendTextualTag(gen,"a","cvslog2web") |
1052 | a.setAttribute("target","_top") |
1053 | a.setAttribute("href",SCRIPT_URL) |
1054 | gen.appendChild(doc.createTextNode(SCRIPT_VERSION)) |
1055 | |
1056 | f=open(permafile,"wb") |
1057 | doc.writexml(f) |
1058 | f.close() |
1059 | doc.unlink() |
1060 | |
1061 | def genPermalinkEntry(i): |
1062 | if i<len(history): return genPermalink(history[i][DATETIME]) |
1063 | return None |
1064 | |
1065 | if PERMALINKDIR: |
1066 | if REBUILDPERMALINKS: |
1067 | # Using this section will rewrite all the permalinks in the history (handy for development...) |
1068 | writePermalink(history[0],histFiles[0],histLinks[0],genPermalinkEntry(1)) |
1069 | for i in range(1,len(history)-1): |
1070 | writePermalink(history[i],histFiles[i],histLinks[i],genPermalinkEntry(i+1),genPermalinkEntry(i-1)) |
1071 | if len(sys.argv)<3 and len(history)>=MAXHISTORY: |
1072 | pass #don't do last permalink if it's a rebuilt -- would break link chain because we don't have previous |
1073 | elif len(droppedHistory)==0: |
1074 | i=len(history)-1 |
1075 | writePermalink(history[i],histFiles[i],histLinks[i],None,genPermalinkEntry(i-1)) |
1076 | else: |
1077 | i=len(history)-1 |
1078 | writePermalink(history[i],histFiles[i],histLinks[i],genPermalink(droppedHistory[-1][DATETIME]),genPermalinkEntry(i-1)) |
1079 | if VERBOSE: |
1080 | print "Permalink generated:", genPermalink(history[0][DATETIME]) |
1081 | else: |
1082 | writePermalink(history[0],histFiles[0],histLinks[0],genPermalinkEntry(1)) |
1083 | curpl=genPermalinkEntry(0) |
1084 | if len(history)>1: |
1085 | writePermalink(history[1],histFiles[1],histLinks[1],genPermalinkEntry(2),curpl) |
1086 | if VERBOSE: |
1087 | print "Permalink generated:", curpl |
1088 |