Changesets can be listed by changeset number.
The Git repository is here.
- Revision:
- 145
- Log:
Updated to place public components in 'public' folder. The script
has been extended with various new configuration options and a new
configuration file added. Designed to be integrated with RCVSweb
from Changeset #144.
- Author:
- adh
- Date:
- Tue Nov 14 22:39:56 +0000 2006
- Size:
- 49917 Bytes
- Properties:
- Property svn:executable is set
1 | #!/bin/env python |
2 | |
3 | # cvslog2web by Ethan Tira-Thompson |
4 | # Released under the GPL (http://www.gnu.org/copyleft/gpl.html) |
5 | # $Date: 2006/08/22 04:34:23 $ |
6 | # Provides syndicated (Atom) and HTML output from CVS commit logs |
7 | SCRIPT_REVISION="$Revision: 1.11 $" |
8 | SCRIPT_URL="http://ethan.tira-thompson.com/cvslog2web" |
9 | |
10 | ################################################################################ |
11 | ################ INSTALLATION ################ |
12 | ################################################################################ |
13 | |
14 | # To install this script, copy it into the CVSROOT directory of your |
15 | # repository, and then add the following line to CVSROOT/loginfo: |
16 | # ALL python $CVSROOT/CVSROOT/cvslog2web.py $CVSROOT %{sVv} [config-file] |
17 | |
18 | # Don't forget you can replace 'ALL' with a filter to only apply |
19 | # cvslog2web to certain modules, or apply different copies of cvslog2web |
20 | # (presumably with different configuration settings) to different modules |
21 | |
22 | # The optional config file can hold the configuration parameters shown below |
23 | # This is convenient to use the same script with different settings for |
24 | # different modules (as opposed to copying the script itself) |
25 | |
26 | # You probably also received some stylesheet and images. The default |
27 | # placement for these files depends on the directory layout of your website: |
28 | # Atom.css -- same directory as the feed output (FEEDOUT) |
29 | # cvslog2web.css -- HTMLOUT_CSS and PERMALINK_CSS, default to same directory as |
30 | # the html page (HTMLOUT), and root of the permalink structure respectively |
31 | # nav_*.gif -- same directory as PERMALINK_CSS |
32 | # *.png -- Root of permalink structure (PERMALINK_URL_PREFIX) |
33 | |
34 | ################################################################################ |
35 | ################ CONFIGURATION ################ |
36 | ################################################################################ |
37 | |
38 | import os,sys |
39 | |
40 | # Given the command line suggested above: |
41 | # arg 0 will be the script path (unused) |
42 | if len(sys.argv)>2: |
43 | root=sys.argv[1] # arg 1 will be the repository root (CVSROOT) |
44 | args=sys.argv[2].split(" ") # arg 2 is the directory of the commit followed by a list of files (space delimited) |
45 | # arg 3 is an optional configuration file parameter, which will override the defaults show below |
46 | # arg 3 is handled at the end of the configuration section |
47 | |
48 | #### These first three settings control the destination of the output #### |
49 | #### Set to None or empty string ("") to disable that format's generation #### |
50 | |
51 | # This is the directory to hold entries as individual html files |
52 | # If PERMALINKDIR is disabled, cannot generate links from the feed and HTML |
53 | # to permanent log entries (but that's perfectly legal to do) |
54 | # PERMALINKDIR should essentially be the root of your webserver, can use |
55 | # PERMALINK_STRUCTURE setting (below) to subdivide files into subdirectories |
56 | PERMALINKDIR="/Users/ejt/Sites/" |
57 | |
58 | # where to direct the Atom feed output, relative paths are interpreted from PERMALINKDIR |
59 | FEEDOUT="cvs.xml" |
60 | |
61 | # will hold the most recent MAXHISTORY entries, relative paths are interpreted from PERMALINKDIR |
62 | # This is intended as a quick list of recent entries, suitable for including into a larger page via SSI or frames |
63 | HTMLOUT="recent.html" |
64 | |
65 | |
66 | # Defaults sets the tmp directory to be created in the same location as the script |
67 | # This directory will hold status files between commits |
68 | # Be sure to think through moving this outside CVSROOT (all commiters need access to the same files) |
69 | TMPDIR=os.path.join(os.path.dirname(sys.argv[0]),"cvslog2web-tmp") |
70 | |
71 | # maximum amount of time (seconds) in which checkins with the same message will be grouped together |
72 | TIMEOUT=15*60 |
73 | |
74 | # number of checkins to retain |
75 | # this will be reset to max(MAXHISTORY,FEED_MAXHISTORY,HTMLOUT_MAXHISTORY) |
76 | MAXHISTORY=0 |
77 | |
78 | # domain to use for entry id tags and default email addresses |
79 | DOMAIN="example.org" |
80 | |
81 | # viewcvs integration -- links from output to diffs |
82 | # format has 4 fields available: filename, path, oldversion, newversion |
83 | DIFFLINKFORMAT="http://www.example.org/cgi/viewcvs.cgi/%(path)s.diff?r1=%(oldversion)s&r2=%(newversion)s" |
84 | |
85 | # For adds and removes, no diff is available, one of oldversion or newversion |
86 | # will be set (depending on add or remove), and the other will be an empty string |
87 | # Note how this default handles this by running them both together so it doesn't matter which is set |
88 | VIEWLINKFORMAT="http://www.example.org/cgi/viewcvs.cgi/%(path)s?rev=%(oldversion)s%(newversion)s&content-type=text/vnd.viewcvs-markup" |
89 | |
90 | # entry titles can be a file list or the first line of the commit message |
91 | TITLE_FILE_LIST, TITLE_MESSAGE_FIRST_LINE=range(2) |
92 | ENTRYTITLE=TITLE_MESSAGE_FIRST_LINE |
93 | |
94 | # if using message as title (TITLE_MESSAGE_FIRST_LINE), can either |
95 | # repeat that line again in the content, or just skip it |
96 | REPEAT_ALWAYS, REPEAT_WHEN_MULTIPLE, REPEAT_NEVER=range(3) |
97 | REPEAT_FIRST_LINE=REPEAT_WHEN_MULTIPLE |
98 | |
99 | # contact list, user names are looked up here, if not found, will fall |
100 | # back to information obtained from the system (password database and DOMAIN) |
101 | CONTACT={ |
102 | "ejt":("Ethan Tira-Thompson","ejt@cs.cmu.edu"), |
103 | "guest":("Guest",None) #use None or empty string ('') to hide email |
104 | } |
105 | |
106 | # Can drop the module name from paths in the file list |
107 | # Useful when you only have one module in the repository, or are filtering |
108 | # the input being passed to cvslog2web from the loginfo file |
109 | # Value is the depth to drop, so 0 drops nothing, 1 drops the first directory, etc. |
110 | DROP_MODULE=0 |
111 | |
112 | # controls whether feedback is given as each type of output is produced |
113 | VERBOSE=True |
114 | |
115 | |
116 | ################ ATOM feed customization ################ |
117 | FEEDTITLE="cvslog2web CVS" # title for the feed |
118 | FEEDDESCRIPTION="" # a short description of the feed |
119 | FEEDHOMELINK="http://www.example.org/" # a link to the "main" page |
120 | FEEDLOGO="" # spec says this image should be twice as wide as it is tall |
121 | FEEDICON="" # spec says this should be squared (as wide as tall) |
122 | FEEDENTRYPREFIX="Commit: " # prefix for entry titles in the feed output |
123 | # Controls generation of a list of <link rel="related"> tags for each file |
124 | # (not shown by many readers, and generally duplicates the file list in those that do.) |
125 | FEEDRELATED=False |
126 | FEED_MAXHISTORY=15 |
127 | |
128 | # FEEDSELFLINK *should* be set, but FEEDID *must* be set... if you don't |
129 | # provide FEEDSELFLINK, you will instead need to fill in FEEDID |
130 | # Further, the HTML index and permalink pages require the self link if you want them to contain links to the feed |
131 | # (why two separate settings? If you move the link, keep the link as the ID to maintain identity) |
132 | # These must be full, absolute URLs, not a relative path! |
133 | FEEDSELFLINK="http://www.example.org/cvs.xml" #self-link for the feed |
134 | FEEDID="" # a globally unique ID for the feed -- if empty, will be set to FEEDSELFLINK |
135 | |
136 | |
137 | ################ HTML output customization ################ |
138 | HTMLOUT_STANDALONE=True # whether to make the root element <html> to stand alone (vs. included via server-side include) |
139 | HTMLOUT_CSS="cvslog2web.css" # style sheet to include (only applies if HTML_STANDALONE is True) |
140 | HTMLTITLE="" # title for the html output, will fall back to FEEDTITLE if blank, use None to disable |
141 | HTMLOUT_ENTRYPREFIX="" # prefix for title lines; does *not* fall back on FEEDENTRYPREFIX |
142 | HTMLOUT_MESSAGE=True # set to False to skip the log body |
143 | HTMLOUT_FILELIST=True # set to False to skip the file list (also skipped if ENTRYTITLE==TITLE_FILE_LIST ) |
144 | HTMLOUT_MAXHISTORY=10 |
145 | # Prefix author list with the given string in a nested SPAN of class |
146 | # 'cvslog2web_authorsprefix' and don't use brackets around names? |
147 | # Works best if CSS drops Authors span onto separate line, e.g. |
148 | # by setting 'display: block'. If the string is empty, uses brackets |
149 | # around the comma-separated names with no prefix span. |
150 | HTMLOUT_AUTHORSPREFIX="" |
151 | # HTMLOUT_ORDER allows you to define the order of items in the summary |
152 | HO_TITLE,HO_AUTHORS,HO_TIMESTAMP,HO_MESSAGE,HO_FILELIST=range(5) |
153 | HTMLOUT_ORDER=[HO_TITLE,HO_AUTHORS,HO_MESSAGE,HO_FILELIST] |
154 | # Timestamp format if the HL_TIMESTAMP section is included (see above) |
155 | # according to 'strftime'. A space and a timezone indication is always |
156 | # appended to the string regardless of format; avoid '%Z' in the string. |
157 | HTMLOUT_TIMESTAMPFORMAT="%a. %B %d, %Y at %I:%M:%S %p" |
158 | # Optional prefix string, under class 'cvslog2web_timestampprefix' within |
159 | # the timestamp DIV. |
160 | HTMLOUT_TIMESTAMPPREFIX="Commited " |
161 | # Outer encapsulating DIV class for each entry in the list |
162 | HTMLOUT_OUTERCLASS="cvslog2web_entry" |
163 | |
164 | # This will be used as the 'target' attribute for all links on the page. |
165 | # (Handy if using the HTML output within a frame on your site, and you want the |
166 | # diffs to load in another frame.) Ignored if empty, otherwise specify a frame |
167 | # name from your site or one of the keywords _self, _parent, _top, or _blank. |
168 | HTMLOUT_TARGET="" |
169 | |
170 | ################ permanent link pages customization ################ |
171 | # Prefix for permalink URLs (structure/filename will be appended) |
172 | # Can be blank to use relative URLs from the feed/HTML pages |
173 | # Put any directory structure in PERMALINK_STRUCTURE, not here |
174 | # Only fill this in if you are specifying an absolute URL (i.e. starts with http://) |
175 | PERMALINK_URL_PREFIX="" |
176 | # Prefix for images; normally they reside in the same place as the permalinks |
177 | PERMALINK_IMG_PREFIX=PERMALINK_URL_PREFIX |
178 | # strftime format string for permalink files -- can spread among subdirs with '/' |
179 | # In a post-processing stage, microseconds are available via '%%(us)d' (with normal printf-style formatting, e.g. %%(us)06d) |
180 | PERMALINK_STRUCTURE="commits/%Y/%m/commit-%d-%H-%M-%S-%%(us)06d.html" |
181 | # style sheet, will fall back to PERMALINK_URL_PREFIX+HTMLOUT_CSS if blank, use None to disable |
182 | PERMALINK_CSS="" |
183 | # text to use in the prev/next buttons |
184 | PL_PREVTEXT="PREV" |
185 | PL_NEXTTEXT="NEXT" |
186 | # Mostly for debugging, causes all permalink pages in history to be regenerated |
187 | # Default causes full rebuild if the script is run directly (without cvs-provided arguments) |
188 | REBUILDPERMALINKS=len(sys.argv)<3 |
189 | |
190 | # PERMALINK_ORDER allows you to define the order of items on permalink pages |
191 | PL_AUTHORS,PL_TIMESTAMP,PL_MESSAGE,PL_FILELIST,PL_PREVLINK,PL_NEXTLINK,PL_FEEDLINK=range(7) |
192 | PERMALINK_ORDER=[PL_TIMESTAMP,PL_PREVLINK,PL_NEXTLINK,PL_AUTHORS,PL_MESSAGE,PL_FILELIST,PL_FEEDLINK] |
193 | # But check this out: you can also include an xhtml string to be written at |
194 | # any point in the page! If the string begins with '<' it is parsed as xml. |
195 | # If it starts with any other character it is escaped as plain text. It is |
196 | # not possible to have incomplete tags which span built-in elements. |
197 | PERMALINK_ORDER.insert(0,"<h1><a href=\""+FEEDHOMELINK+"\">Visit Project Homepage</a></h1>") |
198 | # As with HTMLOUT_STANDALONE - should permalink pages be full HTML documents |
199 | # or just fragments for inclusion in a wider page template? |
200 | PERMALINK_STANDALONE=True |
201 | # No JavaScript in Permalink pages - standard HTML links cover most cases but |
202 | # an onClick attribute directing the window location to the same URL is added |
203 | # for "belt and braces", unless overridden. |
204 | PERMALINK_ADDJS=True |
205 | # Outer encapsulating class for each permalink file's main content |
206 | PERMALINK_OUTERCLASS="cvslog2web_entry" |
207 | # If defined as an array of three strings, each string is taken to be the URL |
208 | # of an image to use in place of the "A" (first array entry), "M" (second array |
209 | # entry) and "R" (third and last array entry) letters, which indicate where files |
210 | # are added, modified or removed respectively in permalink pages. If an empty |
211 | # array, the letters are used. |
212 | PERMALINK_STATUSICONS=[] |
213 | |
214 | ################ LOAD EXTERNAL ################ |
215 | # load overrides from optional external configuration file (if specified) |
216 | try: |
217 | config_file=None |
218 | if len(sys.argv)>3: |
219 | config_file=os.path.join(os.path.dirname(sys.argv[0]),sys.argv[3]) |
220 | if len(sys.argv)==2: # running outside loginfo, just regenerate |
221 | config_file=os.path.join(os.path.dirname(sys.argv[0]),sys.argv[1]) |
222 | root="" |
223 | args=[] |
224 | if config_file: |
225 | if os.path.isfile(config_file): |
226 | execfile(config_file) |
227 | else: |
228 | sys.exit("cvslog2web could not find configuration file "+config_file) |
229 | except: |
230 | import traceback |
231 | print "The following exception occurred while processing external configuration file:" |
232 | traceback.print_exc() |
233 | print "Execution will continue with default settings" |
234 | |
235 | if not FEEDID: |
236 | FEEDID=FEEDSELFLINK |
237 | if HTMLTITLE=="": |
238 | HTMLTITLE=FEEDTITLE |
239 | if PERMALINK_CSS=="": |
240 | if PERMALINK_URL_PREFIX: |
241 | PERMALINK_CSS=PERMALINK_URL_PREFIX+HTMLOUT_CSS |
242 | else: |
243 | PERMALINK_CSS="../"*PERMALINK_STRUCTURE.count("/")+HTMLOUT_CSS |
244 | # if these are already set to an absolute pathes, then these are no-ops |
245 | HTMLOUT=os.path.join(PERMALINKDIR,HTMLOUT) |
246 | FEEDOUT=os.path.join(PERMALINKDIR,FEEDOUT) |
247 | |
248 | |
249 | ################################################################################ |
250 | ################ INPUT PARSING ################ |
251 | ################################################################################ |
252 | # You don't want to change much below here... |
253 | # It's icky code from here on out. |
254 | |
255 | import re, pickle, time, datetime, pwd |
256 | import xml.dom, xml.dom.minidom |
257 | |
258 | MAXHISTORY=max(MAXHISTORY,FEED_MAXHISTORY,HTMLOUT_MAXHISTORY) |
259 | |
260 | curtime=datetime.datetime.utcnow() |
261 | curtime_str=curtime.strftime("%Y-%m-%dT%H:%M:%S.%%06d+00:00") % curtime.microsecond |
262 | |
263 | if os.path.exists(TMPDIR): |
264 | if not os.path.isdir(TMPDIR): sys.exit("cvslog2web: file blocking TMPDIR "+TMPDIR) |
265 | else: |
266 | os.makedirs(TMPDIR) |
267 | |
268 | # verify CVSROOT |
269 | root=root.rstrip(os.sep) #strip any extra "/" at the end |
270 | if root and not os.path.isdir(root): sys.exit("cvslog2web: bad CVSROOT: "+root) |
271 | |
272 | # Pull the cvslog2web script's version number out of the CVS keyword replacement |
273 | SCRIPT_VERSION=re.findall("[0-9.]+",SCRIPT_REVISION) |
274 | if len(SCRIPT_VERSION)==0: |
275 | sys.exit("cvslog2web: invalid SCRIPT_REVISION setting (no version number) "+SCRIPT_REVISION) |
276 | elif len(SCRIPT_VERSION)>1: |
277 | print "WARNING cvslog2web SCRIPT_REVISION contains multiple version strings?", SCRIPT_REVISION |
278 | SCRIPT_VERSION=SCRIPT_VERSION[0] |
279 | |
280 | if len(sys.argv)<3: |
281 | status=os.sep |
282 | else: |
283 | status=sys.stdin.readline()[:-1] |
284 | if not status.startswith("Update of "): sys.exit("cvslog2web: unrecognized cvs output") |
285 | status=status[len("Update of "):] |
286 | |
287 | if not status.startswith(root+os.sep): sys.exit("cvslog2web: commit outside repository?") |
288 | cidir=status[len(root)+1:] #don't leave intro '/' on cidir |
289 | |
290 | #first word is the directory, pop it off |
291 | cipop="" |
292 | while cipop!=cidir: |
293 | if len(args)==0: sys.exit("cvslog2web: Unable to parse cvs output") |
294 | cipop=os.path.join(cipop,args.pop(0)) |
295 | del cipop |
296 | |
297 | # test to see if this is the result of an import vs. regular commit |
298 | if len(sys.argv)>=3 and sys.argv[2].endswith(" - Imported sources"): |
299 | files=args[:-len(" - Imported sources")] |
300 | imported=True |
301 | else: |
302 | # args (set from command line) is a string with a series of filename,oldvers,newvers values |
303 | # this regular expression parses the string into a list of tuples |
304 | # This RE is smart enough to handle filenames with spaces or commas in them! |
305 | files=re.findall("(.*?),([0-9.]+|NONE),([0-9.]+|NONE) "," ".join(args)+" ") |
306 | imported=False |
307 | |
308 | # This function is used to convert a dotted decimal version string to a version list: "1.2.3" -> [1,2,3] |
309 | # This form is much more applicable to comparison -- lexigraphic comparison of original version string will get it wrong |
310 | def version2list(s): |
311 | if s=="NONE": return [] |
312 | return map(int,s.split(".")) |
313 | # Now this next bit converts files into a dictionary mapping of |
314 | # names to [oldversion,newversion] lists, using the list form of version numbers |
315 | # (we're going to add on to the list of values for each file) |
316 | files=dict([(x[0], map(version2list,x[1:])) for x in files]) |
317 | |
318 | # Enough of the command line arguments, now parse the stdin message |
319 | # We still need to get the status flag (add/modify/remove) for each file |
320 | # First we need to get the list of file names in each section |
321 | # Each of these sections at this point is just a space-delimited list of file names |
322 | added=[]; modified=[]; removed=[]; message=[]; importedFiles=[]; importedTag="" |
323 | if len(sys.argv)>=3: |
324 | line=sys.stdin.readline() |
325 | section=[] #empty initial section |
326 | while line: |
327 | if section is message: |
328 | if imported and line=="Status:\n": |
329 | message=["".join(message).strip()+"\n"] |
330 | section=[] |
331 | else: |
332 | section.append(line) |
333 | elif line=="Log Message:\n": |
334 | section=message |
335 | elif line=="Added Files:\n": |
336 | section=added |
337 | elif line=="Modified Files:\n": |
338 | section=modified |
339 | elif line=="Removed Files:\n": |
340 | section=removed |
341 | elif imported and (line.startswith("Vendor Tag:") or line.startswith("Release Tags:")): |
342 | message.append(line) |
343 | section=importedFiles |
344 | if line.startswith("Release Tags:"): |
345 | importedTag=line[len("Release Tags:"):].strip() |
346 | elif section is importedFiles: |
347 | m=re.findall("([A-Z]) (.*)",line[:-1]) |
348 | if len(m)==1: |
349 | importedFiles.append(m[0]) |
350 | else: |
351 | section.append(line[1:-1]) #strip initial tab and trailing newline/linefeed |
352 | line=sys.stdin.readline() |
353 | added="".join(added) |
354 | modified="".join(modified) |
355 | removed="".join(removed) |
356 | message="".join(message).strip() |
357 | importedFiles.sort(lambda x,y: cmp(x[1],y[1])) |
358 | |
359 | # Don't do anything with new directories (everyone runs with 'update -dP' anyway right?) |
360 | # Directories don't matter until there's something in them |
361 | if len(sys.argv)>=3 and sys.argv[2].endswith(" - New directory"): |
362 | # waited this long because CVS throws a hissy fit ("broken pipe...") |
363 | #if you don't read the log message before quitting |
364 | sys.exit() |
365 | |
366 | # Constants for symbolic reference to information |
367 | ADDED="add" |
368 | MODIFIED="mod" |
369 | REMOVED="del" |
370 | GHOST="ghost" # this comes up later... we won't get this directly in the input |
371 | |
372 | # We have the file names from the command line in 'files' (parsed above). |
373 | # Now we need to see which files are in which status section. |
374 | # A bit tricky because each space could be separating files |
375 | # or could be part of a filename itself. |
376 | |
377 | # Spaces are such a pain, especially when they are being used |
378 | # as the delimiter, and your input doesn't escape the "real" spaces! Grrr. |
379 | # *** Still not perfect... if "foo" and "foo bar" are both |
380 | # involved, this could confuse it. *** |
381 | def processFiles(l,tag,out): |
382 | partial="" |
383 | for f in l.split(" "): |
384 | f=partial+f |
385 | if f in files: |
386 | files[f].append(tag) |
387 | out[os.path.join(cidir,f)]=tuple(files[f]) |
388 | partial="" |
389 | else: |
390 | partial=f+" " |
391 | if partial.strip(): |
392 | print "WARNING: partial filename in", tag, "section: '"+partial.strip()+"'" |
393 | paths={} |
394 | processFiles(added,ADDED,paths) |
395 | processFiles(modified,MODIFIED,paths) |
396 | processFiles(removed,REMOVED,paths) |
397 | |
398 | # now paths is a dictionary mapping full path to (oldv,newv,status) tuples |
399 | |
400 | # We've got our input regarding the current log entry, |
401 | # need to compare that against previous entry and see if it's |
402 | # all part of the same commit (since this script will be called |
403 | # separately for each directory involved in the commit, but |
404 | # we want them all to be associated in the same log entry) |
405 | |
406 | # load the last checkin's message and time, with a default value if file not found |
407 | def readfile(name,default): |
408 | try: f=open(os.path.join(TMPDIR,name),"rb") |
409 | except: return default #doesn't exist, that's ok |
410 | else: |
411 | val=pickle.load(f) |
412 | f.close() |
413 | return val |
414 | |
415 | # lasttime is the datetime object from the last call to the script |
416 | lasttime=readfile("lasttime",curtime-datetime.timedelta(0,TIMEOUT)) |
417 | |
418 | # history is everything we know about each entry, up to MAXHISTORY long |
419 | # The format is described in the next several lines |
420 | history=readfile("history",[]) |
421 | |
422 | # These enumerations define the basic fields in each entry in history |
423 | PATH,DATETIME,TIMESTAMP,AUTHORS,MESSAGE,IMPORTEDFILES,IMPORTEDTAG=range(7) |
424 | |
425 | ### History format ### |
426 | # History is a pretty major, and somewhat complex structure. |
427 | # It's basically just a list of entries, where each entry is a list indexed |
428 | # by the enumerations listed above, defining basic format the log entries |
429 | # PATH element holds the 'paths' variable (dictionary mapping paths to version numbers and status flag) |
430 | # ID is a string holding the entries unique and immutable ID for the feed (also embeds an initial-commit timestamp) |
431 | # TIMESTAMP is the last updated timestamp (datetime object) |
432 | # AUTHORS is a list of (name,email) tuples (both are strings) |
433 | # MESSAGE is a string holding the log message read from stdin |
434 | ###################### |
435 | |
436 | # get the user's name and username |
437 | # This might be a unix-only feature, not the end of the world if you |
438 | # have to rely on the contact list in the configuration section, or |
439 | # substitute another method |
440 | pw_db=pwd.getpwuid(os.getuid()) |
441 | user=pw_db[0] # the "short" name, e.g. 'ejt' |
442 | user_name=pw_db[4] # the "full" name, e.g. 'Ethan Tira-Thompson' |
443 | |
444 | # If within the TIMEOUT and have the same message, merge |
445 | # the current file list with the first entry of the history |
446 | # Otherwise, add a new entry (popping old entries if len(history)>MAXHISTORY...) |
447 | droppedHistory=[] # stores popped entries, reused when rebuilding permalinks |
448 | if len(sys.argv)<3: |
449 | pass # rebuild shouldn't change any history entries |
450 | elif curtime-lasttime<datetime.timedelta(0,TIMEOUT) and len(history)>0 and history[0][MESSAGE]==message and not imported: |
451 | # merge is a little interesting: |
452 | # If the file is added and then modified with the same log message, |
453 | # merge as still added, just with the later revision number |
454 | # Removed and re-added is modified |
455 | # Similarly, modified then removed is just removed |
456 | # However, added then removed is as if never existed, but still need |
457 | # to store file info, (in case of re-add) hence the "ghost" status |
458 | def merge(v1,v2): |
459 | ov=min(v1[0],v2[0]) |
460 | nv=max(v1[1],v2[1]) |
461 | if v1[2]==v2[2]: |
462 | t=v1[2] |
463 | elif v1[2]==ADDED and v2[2]==REMOVED: |
464 | t=GHOST |
465 | elif v1[2]==REMOVED and v2[2]==ADDED: |
466 | t=MODIFIED |
467 | elif v1[2]==ADDED or v2[2]==ADDED: |
468 | t=ADDED |
469 | elif v1[2]==REMOVED or v2[2]==REMOVED: |
470 | t=REMOVED |
471 | else: |
472 | t=GHOST |
473 | return (ov,nv,t) |
474 | |
475 | for k,v in paths.iteritems(): |
476 | if history[0][PATH].setdefault(k,v)!=v: |
477 | history[0][PATH][k]=merge(history[0][PATH][k],v) |
478 | history[0][TIMESTAMP]=curtime_str |
479 | history[0][AUTHORS][user]=CONTACT.get(user,(user_name,user+"@"+DOMAIN)) |
480 | |
481 | else: # push paths as a new entry on its own |
482 | authors={user:CONTACT.get(user,(user,user+"@"+DOMAIN))} |
483 | history.insert(0,[paths,curtime,curtime_str,authors,message,importedFiles,importedTag]) |
484 | while len(history)>MAXHISTORY: droppedHistory.append(history.pop()) |
485 | |
486 | |
487 | ################################################################################ |
488 | ################ STATUS STORAGE ################ |
489 | ################################################################################ |
490 | # We want to write out the history as soon as possible to reduce the risk |
491 | # of a processing error causing us to drop or corrupt an entry. At least |
492 | # once it's stored in the file, if there's an error I can debug it and then |
493 | # we can regenerate the output. |
494 | if len(sys.argv)>=3: # don't touch files if it's a rebuild |
495 | f=open(os.path.join(TMPDIR,"lasttime"),"wb") |
496 | pickle.dump(curtime,f) |
497 | f.close() |
498 | f=open(os.path.join(TMPDIR,"history"),"wb") |
499 | pickle.dump(history,f) |
500 | f.close() |
501 | |
502 | |
503 | ################################################################################ |
504 | ################ FUNCTION DECLARATIONS ################ |
505 | ################################################################################ |
506 | |
507 | # shorthand for adding a subnode with a particular name and textual content |
508 | def appendTextualTag(node,name,str): |
509 | n=node.appendChild(doc.createElement(name)) |
510 | n.appendChild(doc.createTextNode(str)) |
511 | node.appendChild(doc.createTextNode("\n")) |
512 | return n |
513 | |
514 | # quick-n-dirty version of set(), when used with reduce() |
515 | def collect(x,y): |
516 | if y not in x: x.append(y) |
517 | return x |
518 | |
519 | # removes first DROP_MODULE directory names from path |
520 | def dropModules(path): |
521 | for i in range(DROP_MODULE): |
522 | x=path.find(os.sep) |
523 | if x==-1: |
524 | return "" |
525 | path=path[x+1:] |
526 | return path |
527 | |
528 | # Converts a list of file names to a more compact form, grouping |
529 | # those in directories together: |
530 | # /foo/bar and /foo/baz become /foo/{bar,baz} |
531 | def makeStr(names): |
532 | dirs=reduce(collect,[os.path.dirname(x) for x in names],[]) |
533 | dirs.sort(lambda x,y:-cmp(len(x),len(y))) #go from longest (deepest) to shortest (shallowest) |
534 | common={} |
535 | remain=names[:] |
536 | for d in dirs: |
537 | paths=filter(lambda x: x.startswith(d),remain) |
538 | paths.sort() |
539 | if len(paths)>1: |
540 | for p in paths: |
541 | common.setdefault(dropModules(d),[]).append(p[len(d)+1:]) |
542 | remain.remove(p) |
543 | for p in remain: |
544 | common.setdefault(dropModules(os.path.dirname(p)),[]).append(dropModules(p)) |
545 | ks=common.keys() |
546 | ks.sort() |
547 | strs=[] |
548 | for k in ks: |
549 | v=common[k] |
550 | if len(v)>1: |
551 | if k: |
552 | strs.append(os.path.join(k,"{"+(",".join(v))+"}")) |
553 | else: |
554 | strs.append(", ".join(v)) |
555 | else: |
556 | strs.append(v[0]) |
557 | return ", ".join(strs) |
558 | |
559 | # This older version is a bit more simplistic, but doesn't |
560 | # handle singleton paths as nicely |
561 | #def makeStr(names): |
562 | # dirs=reduce(collect,[os.path.dirname(x) for x in names],[]) |
563 | # strs=[] |
564 | # for d in dirs: |
565 | # files=map(os.path.basename,filter(lambda x: x.startswith(d),names)) |
566 | # if len(files)==1: |
567 | # strs.append(os.path.join(d,files)) |
568 | # else: |
569 | # strs.append(os.path.join(d,"{"+",".join(files)+"}")) |
570 | # return " ".join(strs) |
571 | |
572 | # convert version list to string representation |
573 | # [major, minor, patch, ...] -> major.minor.patch.... |
574 | def vers2str(v): |
575 | return ".".join(map(str,v)) |
576 | |
577 | |
578 | # this part computes titles and links for each entry in the history |
579 | histFiles=[] |
580 | for entry in history: |
581 | # generate title string |
582 | addstr=makeStr([k for k,v in entry[PATH].iteritems() if v[2]==ADDED]) |
583 | modstr=makeStr([k for k,v in entry[PATH].iteritems() if v[2]==MODIFIED]) |
584 | remstr=makeStr([k for k,v in entry[PATH].iteritems() if v[2]==REMOVED]) |
585 | files=[] |
586 | if len(addstr)>0: |
587 | files.append("Added "+addstr) |
588 | if len(modstr)>0: |
589 | files.append("Modified "+modstr) |
590 | if len(remstr)>0: |
591 | files.append("Removed "+remstr) |
592 | files="; ".join(files) |
593 | histFiles.append(files) |
594 | |
595 | # Links are a bit tricky; this will generate a dictionary of |
596 | # "common paths", where each key is the path, value is a list of |
597 | # files (may include singleton paths) within that path |
598 | def makeLinks(paths): |
599 | dirs=reduce(collect,[os.path.dirname(x) for x in paths],[]) |
600 | dirs.sort(lambda x,y:-cmp(len(x),len(y))) #go from longest (deepest) to shortest (shallowest) |
601 | links={} |
602 | remain=paths |
603 | for d in dirs: |
604 | paths=filter(lambda x: x.startswith(d),remain) |
605 | if len(paths)>1: |
606 | paths.sort() |
607 | for p in paths: |
608 | info=entry[PATH][p] |
609 | links.setdefault(dropModules(d),[]).append(dict(path=p,filename=p[len(d)+1:],oldversion=vers2str(info[0]),newversion=vers2str(info[1]))) |
610 | remain.remove(p) |
611 | for p in remain: |
612 | info=entry[PATH][p] |
613 | links.setdefault(dropModules(os.path.dirname(p)),[]).append(dict(path=p,filename=dropModules(p),oldversion=vers2str(info[0]),newversion=vers2str(info[1]))) |
614 | return links |
615 | |
616 | histLinks=[] |
617 | if ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE: |
618 | for entry in history: |
619 | addlinks=makeLinks([k for k,v in entry[PATH].iteritems() if v[2]==ADDED]) |
620 | modlinks=makeLinks([k for k,v in entry[PATH].iteritems() if v[2]==MODIFIED]) |
621 | remlinks=makeLinks([k for k,v in entry[PATH].iteritems() if v[2]==REMOVED]) |
622 | links={} |
623 | if len(addlinks)>0: |
624 | links["Added: "]=addlinks |
625 | if len(modlinks)>0: |
626 | links["Modified: "]=modlinks |
627 | if len(remlinks)>0: |
628 | links["Removed: "]=remlinks |
629 | histLinks.append(links) |
630 | else: |
631 | for entry in history: |
632 | histLinks.append({"Diff: ": makeLinks(entry[PATH].keys())}) |
633 | |
634 | def genPermalink(t): |
635 | permdir=t.strftime(PERMALINK_STRUCTURE) % {"us":t.microsecond} |
636 | return PERMALINK_URL_PREFIX+permdir |
637 | |
638 | def genPermafile(t): |
639 | permdir=t.strftime(PERMALINK_STRUCTURE) % {"us":t.microsecond} |
640 | return os.path.join(PERMALINKDIR,permdir.replace("/",os.sep)) |
641 | |
642 | |
643 | ################################################################################ |
644 | ################ RSS (ATOM) OUTPUT ################ |
645 | ################################################################################ |
646 | |
647 | if FEEDOUT: |
648 | dom=xml.dom.getDOMImplementation() |
649 | doc=dom.createDocument("http://www.w3.org/2005/Atom","feed",None) |
650 | feed=doc.documentElement |
651 | doc.insertBefore(doc.createProcessingInstruction("xml-stylesheet",'href="Atom.css" type="text/css"'),feed) |
652 | feed.setAttribute("xmlns","http://www.w3.org/2005/Atom") |
653 | feed.appendChild(doc.createTextNode("\n")) |
654 | |
655 | # feed header tags |
656 | appendTextualTag(feed,"id",FEEDID) |
657 | appendTextualTag(feed,"title",FEEDTITLE) |
658 | if FEEDHOMELINK: |
659 | linknode=feed.appendChild(doc.createElement("link")) |
660 | linknode.setAttribute("rel","alternate") |
661 | linknode.setAttribute("href",FEEDHOMELINK) |
662 | feed.appendChild(doc.createTextNode("\n")) |
663 | if FEEDSELFLINK: |
664 | linknode=feed.appendChild(doc.createElement("link")) |
665 | linknode.setAttribute("rel","self") |
666 | linknode.setAttribute("href",FEEDSELFLINK) |
667 | feed.appendChild(doc.createTextNode("\n")) |
668 | if FEEDLOGO: |
669 | appendTextualTag(feed,"logo",FEEDLOGO) |
670 | if FEEDICON: |
671 | appendTextualTag(feed,"icon",FEEDICON) |
672 | if FEEDDESCRIPTION: |
673 | appendTextualTag(feed,"subtitle",FEEDDESCRIPTION) |
674 | appendTextualTag(feed,"updated",curtime_str) |
675 | generator=feed.appendChild(doc.createElement("generator")) |
676 | generator.setAttribute("uri",SCRIPT_URL) |
677 | generator.setAttribute("version",SCRIPT_VERSION) |
678 | generator.appendChild(doc.createTextNode("cvslog2web")) |
679 | feed.appendChild(doc.createTextNode("\n")) |
680 | |
681 | for (entry,files,links) in zip(history,histFiles,histLinks)[:FEED_MAXHISTORY]: |
682 | msg=entry[MESSAGE].splitlines() |
683 | |
684 | # entry header tags |
685 | node=feed.appendChild(doc.createElement("entry")) |
686 | node.appendChild(doc.createTextNode("\n")) |
687 | t=entry[DATETIME] |
688 | appendTextualTag(node,"id",entry[DATETIME].strftime("tag:%%s,%Y-%m-%d:%%05d.%%06d")%(DOMAIN,t.hour*60*60+t.minute*60+t.second,t.microsecond)) |
689 | if ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and len(msg)>0: |
690 | appendTextualTag(node,"title",FEEDENTRYPREFIX+msg[0]) |
691 | elif ENTRYTITLE==TITLE_FILE_LIST or len(msg)==0: |
692 | appendTextualTag(node,"title",FEEDENTRYPREFIX+files) |
693 | else: |
694 | sys.exit("cvslog2web: bad ENTRYTITLE setting") |
695 | appendTextualTag(node,"updated",entry[TIMESTAMP]) |
696 | for n in entry[AUTHORS].itervalues(): |
697 | author=node.appendChild(doc.createElement("author")) |
698 | author.appendChild(doc.createElement("name")).appendChild(doc.createTextNode(n[0])) |
699 | if len(n)>1 and n[1]!="": |
700 | author.appendChild(doc.createElement("email")).appendChild(doc.createTextNode(n[1])) |
701 | node.appendChild(doc.createTextNode("\n")) |
702 | if PERMALINKDIR: |
703 | linknode=node.appendChild(doc.createElement("link")) |
704 | linknode.setAttribute("rel","alternate") |
705 | linknode.setAttribute("href",genPermalink(entry[DATETIME])) |
706 | linknode.setAttribute("type","text/html") |
707 | if FEEDRELATED: |
708 | for ll in links.itervalues(): |
709 | for group in ll.itervalues(): |
710 | for l in group: |
711 | linknode=node.appendChild(doc.createElement("link")) |
712 | linknode.setAttribute("rel","related") |
713 | linknode.setAttribute("title",l["path"]) |
714 | if l["oldversion"] and l["newversion"]: |
715 | linknode.setAttribute("href",(DIFFLINKFORMAT % l).replace(" ","%20")) |
716 | else: |
717 | linknode.setAttribute("href",(VIEWLINKFORMAT % l).replace(" ","%20")) |
718 | linknode.setAttribute("type","text/html") |
719 | |
720 | # CONTENT section |
721 | content=node.appendChild(doc.createElement("content")) |
722 | content.setAttribute("type","xhtml") |
723 | content=content.appendChild(doc.createElement("div")) |
724 | content.setAttribute("xmlns","http://www.w3.org/1999/xhtml") |
725 | content.appendChild(doc.createTextNode("\n")) |
726 | for i,m in enumerate(msg): |
727 | if i==0 and ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and (REPEAT_FIRST_LINE==REPEAT_NEVER or REPEAT_FIRST_LINE==REPEAT_WHEN_MULTIPLE and len(msg)==1): |
728 | continue |
729 | appendTextualTag(content,"div",m).setAttribute("style","padding:.25em 0;") |
730 | # End of message embedding |
731 | |
732 | #now add links to content |
733 | filelist=content.appendChild(doc.createElement("div")) |
734 | if len(msg)>1: |
735 | filelist.setAttribute("style","padding:.6em 0;") |
736 | for pre,ll in links.iteritems(): |
737 | diffs=filelist.appendChild(doc.createElement("div")) |
738 | diffs.setAttribute("style","padding:.25em 0;") |
739 | diffs.appendChild(doc.createTextNode(pre)) |
740 | diffs=diffs.appendChild(doc.createElement("tt")) |
741 | firstSet=True |
742 | for k,group in ll.iteritems(): |
743 | if not firstSet: |
744 | diffs.appendChild(doc.createTextNode(", ")) |
745 | if len(group)>1 and k: |
746 | diffs.appendChild(doc.createTextNode(os.path.join(k,"{"))) |
747 | firstLink=True |
748 | for l in group: |
749 | if not firstLink: |
750 | diffs.appendChild(doc.createTextNode(", ")) |
751 | a=diffs.appendChild(doc.createElement("a")) |
752 | if l["oldversion"] and l["newversion"]: |
753 | a.setAttribute("href",(DIFFLINKFORMAT % l).replace(" ","%20")) |
754 | else: |
755 | a.setAttribute("href",(VIEWLINKFORMAT % l).replace(" ","%20")) |
756 | a.appendChild(doc.createTextNode(l["filename"])) |
757 | firstLink=False |
758 | if len(group)>1 and k: |
759 | diffs.appendChild(doc.createTextNode("}")) |
760 | firstSet=False |
761 | filelist.appendChild(doc.createTextNode("\n")) |
762 | |
763 | #end of content |
764 | node.appendChild(doc.createTextNode("\n")) |
765 | |
766 | f=open(FEEDOUT,"wb") |
767 | doc.writexml(f) |
768 | f.close() |
769 | doc.unlink() |
770 | if VERBOSE: |
771 | print "Feed update successful" |
772 | |
773 | |
774 | ################################################################################ |
775 | ################ HTML OUTPUT ################ |
776 | ################################################################################ |
777 | |
778 | # some functions for HTML output, shared by HTMLOUT and PERMALINK output |
779 | def HTMLHeader(dom,title,css,rootclass): |
780 | doctype=dom.createDocumentType("html","//W3C//DTD XHTML 1.0 Transitional//EN","http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd") |
781 | doc=dom.createDocument("http://www.w3.org/1999/xhtml","html",doctype) |
782 | html=doc.documentElement |
783 | html.setAttribute("xmlns","http://www.w3.org/1999/xhtml") |
784 | html.appendChild(doc.createTextNode("\n")) |
785 | head=html.appendChild(doc.createElement("head")) |
786 | appendTextualTag(head,"title",title) |
787 | if css: |
788 | link=head.appendChild(doc.createElement("link")) |
789 | link.setAttribute("rel","stylesheet") |
790 | link.setAttribute("type","text/css") |
791 | link.setAttribute("href",css) |
792 | head.appendChild(doc.createTextNode("\n")) |
793 | if FEEDOUT and FEEDSELFLINK: |
794 | link=head.appendChild(doc.createElement("link")) |
795 | link.setAttribute("rel","alternate") |
796 | link.setAttribute("title","Atom Syndication Feed") |
797 | link.setAttribute("type","application/atom+xml") |
798 | link.setAttribute("href",FEEDSELFLINK) |
799 | head.appendChild(doc.createTextNode("\n")) |
800 | body=html.appendChild(doc.createElement("body")) |
801 | body.appendChild(doc.createTextNode("\n")) |
802 | rootdiv=body.appendChild(doc.createElement("div")) |
803 | body.appendChild(doc.createTextNode("\n")) |
804 | rootdiv.appendChild(doc.createTextNode("\n")) |
805 | rootdiv.setAttribute("class",rootclass) |
806 | return (doc,rootdiv) |
807 | |
808 | def appendLinks(filelist,pre,ll): |
809 | filelist=filelist.appendChild(doc.createElement("p")) |
810 | appendTextualTag(filelist,"span",pre).setAttribute("class","cvslog2web_filestatus") |
811 | firstSet=True |
812 | for k,v in ll.iteritems(): |
813 | if not firstSet: |
814 | filelist.appendChild(doc.createTextNode(", ")) |
815 | if len(v)>1 and k: |
816 | filelist.appendChild(doc.createTextNode(os.path.join(k,"{"))) |
817 | firstLink=True |
818 | for l in v: |
819 | if not firstLink: |
820 | filelist.appendChild(doc.createTextNode(", ")) |
821 | a=filelist.appendChild(doc.createElement("a")) |
822 | if HTMLOUT_TARGET: |
823 | a.setAttribute("target",HTMLOUT_TARGET) |
824 | if l["oldversion"] and l["newversion"]: |
825 | a.setAttribute("href",(DIFFLINKFORMAT % l).replace(" ","%20")) |
826 | else: |
827 | a.setAttribute("href",(VIEWLINKFORMAT % l).replace(" ","%20")) |
828 | a.appendChild(doc.createTextNode(l["filename"])) |
829 | firstLink=False |
830 | if len(v)>1 and k: |
831 | filelist.appendChild(doc.createTextNode("}")) |
832 | firstSet=False |
833 | |
834 | |
835 | ################ HTML STANDALONE OUTPUT ################ |
836 | |
837 | if HTMLOUT: |
838 | dom=xml.dom.getDOMImplementation() |
839 | if not HTMLOUT_STANDALONE: |
840 | if "createDocumentFragment" in dir(dom): |
841 | doc=dom.createDocumentFragment() |
842 | else: |
843 | doc=dom.createDocument(None,None,None) |
844 | rootdiv=doc.appendChild(doc.createElement("div")) |
845 | rootdiv.setAttribute("xmlns","http://www.w3.org/1999/xhtml") |
846 | rootdiv.appendChild(doc.createTextNode("\n")) |
847 | rootdiv.setAttribute("class","cvslog2web_index") |
848 | else: |
849 | doc,rootdiv=HTMLHeader(dom,HTMLTITLE,HTMLOUT_CSS,"cvslog2web_index") |
850 | |
851 | for (entry,files,links) in zip(history,histFiles,histLinks)[:HTMLOUT_MAXHISTORY]: |
852 | msg=entry[MESSAGE].splitlines() |
853 | |
854 | # entry header tags |
855 | node=rootdiv.appendChild(doc.createElement("div")) |
856 | node.setAttribute("class",HTMLOUT_OUTERCLASS) |
857 | node.appendChild(doc.createTextNode("\n")) |
858 | |
859 | for section in HTMLOUT_ORDER: |
860 | if section==HO_TITLE: |
861 | # Title section |
862 | titlenode=node.appendChild(doc.createElement("div")) |
863 | titlenode.setAttribute("class","cvslog2web_title") |
864 | if PERMALINKDIR: |
865 | a=appendTextualTag(titlenode,"a",HTMLOUT_ENTRYPREFIX) |
866 | if HTMLOUT_TARGET: |
867 | a.setAttribute("target",HTMLOUT_TARGET) |
868 | a.setAttribute("href",genPermalink(entry[DATETIME])) |
869 | if ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and len(msg)>0: appendTextualTag(a,"span",msg[0]).setAttribute("class","cvslog2web_message") |
870 | elif ENTRYTITLE==TITLE_FILE_LIST or len(msg)==0: appendTextualTag(a,"span",files).setAttribute("class","cvslog2web_filelist") |
871 | else: sys.exit("cvslog2web: bad ENTRYTITLE setting") |
872 | else: |
873 | titlenode.appendChild(doc.createTextNode(HTMLOUT_ENTRYPREFIX)) |
874 | if ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE: appendTextualTag(titlenode,"span",msg[0]).setAttribute("class","cvslog2web_message") |
875 | elif ENTRYTITLE==TITLE_FILE_LIST: appendTextualTag(titlenode,"span",files).setAttribute("class","cvslog2web_filelist") |
876 | else: sys.exit("cvslog2web: bad ENTRYTITLE setting") |
877 | titlenode.appendChild(doc.createTextNode(" ")) |
878 | |
879 | elif section==HO_AUTHORS: |
880 | # Authors section |
881 | authors=titlenode.appendChild(doc.createElement("span")) |
882 | authors.setAttribute("class","cvslog2web_authors") |
883 | if HTMLOUT_AUTHORSPREFIX: |
884 | authorsprefix=authors.appendChild(doc.createElement("span")) |
885 | authorsprefix.setAttribute("class","cvslog2web_authorsprefix") |
886 | authorsprefix.appendChild(doc.createTextNode(HTMLOUT_AUTHORSPREFIX)) |
887 | authors.appendChild(doc.createTextNode(",".join([k for k,n in entry[AUTHORS].iteritems()]))) |
888 | else: |
889 | authors.appendChild(doc.createTextNode("("+",".join([k for k,n in entry[AUTHORS].iteritems()])+")")) |
890 | |
891 | elif section==HO_TIMESTAMP: |
892 | # Commit date and time |
893 | if time.daylight: |
894 | t=entry[DATETIME]-datetime.timedelta(seconds=time.altzone) |
895 | tz=time.tzname[1] |
896 | else: |
897 | t=entry[DATETIME]-datetime.timedelta(seconds=time.timezone) |
898 | tz=time.tzname[0] |
899 | timestampstr=t.strftime(HTMLOUT_TIMESTAMPFORMAT+" "+tz) |
900 | timestamp=node.appendChild(doc.createElement("div")) |
901 | timestamp.setAttribute("class","cvslog2web_timestamp") |
902 | if HTMLOUT_TIMESTAMPPREFIX: |
903 | timestampprefix=timestamp.appendChild(doc.createElement("span")) |
904 | timestampprefix.setAttribute("class","cvslog2web_timestampprefix") |
905 | timestampprefix.appendChild(doc.createTextNode(HTMLOUT_TIMESTAMPPREFIX)) |
906 | timestamp.appendChild(doc.createTextNode(timestampstr)) |
907 | |
908 | elif section==HO_MESSAGE: |
909 | # Content section |
910 | if HTMLOUT_MESSAGE: |
911 | content=node.appendChild(doc.createElement("div")) |
912 | content.setAttribute("class","cvslog2web_message") |
913 | content.appendChild(doc.createTextNode("\n")) |
914 | for i,m in enumerate(msg): |
915 | if i==0 and ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and (REPEAT_FIRST_LINE==REPEAT_NEVER or REPEAT_FIRST_LINE==REPEAT_WHEN_MULTIPLE and len(msg)==1): |
916 | continue |
917 | appendTextualTag(content,"p",m) |
918 | if len(msg)==1 and ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and (REPEAT_FIRST_LINE==REPEAT_NEVER or REPEAT_FIRST_LINE==REPEAT_WHEN_MULTIPLE): |
919 | node.removeChild(content) |
920 | |
921 | elif section==HO_FILELIST: |
922 | if len(links)>0 and HTMLOUT_FILELIST and ENTRYTITLE!=TITLE_FILE_LIST: |
923 | #now add links to content |
924 | filelist=node.appendChild(doc.createElement("div")) |
925 | filelist.setAttribute("class","cvslog2web_filelist") |
926 | filelist.appendChild(doc.createTextNode("\n")) |
927 | for pre,l in links.iteritems(): |
928 | appendLinks(filelist,pre,l) |
929 | |
930 | #end of content |
931 | node.appendChild(doc.createTextNode("\n")) |
932 | |
933 | gen=rootdiv.appendChild(doc.createElement("div")) |
934 | gen.setAttribute("class","cvslog2web_credit") |
935 | gen.appendChild(doc.createTextNode("Generated by ")) |
936 | a=appendTextualTag(gen,"a","cvslog2web") |
937 | a.setAttribute("target","_top") |
938 | a.setAttribute("href",SCRIPT_URL) |
939 | gen.appendChild(doc.createTextNode(SCRIPT_VERSION)) |
940 | |
941 | f=open(HTMLOUT,"wb") |
942 | if HTMLOUT_STANDALONE or "createDocumentFragment" in dir(dom): |
943 | doc.writexml(f) |
944 | else: |
945 | # createDocumentFragment unavailable, hack it and strip the xml processing instruction |
946 | s=doc.toxml() |
947 | s=s[s.find("\n")+1:] |
948 | f.write(s) |
949 | f.close() |
950 | doc.unlink() |
951 | if VERBOSE: |
952 | print "HTML update successful" |
953 | |
954 | |
955 | ################ HTML PERMALINK OUTPUT ################ |
956 | |
957 | def normalizeLink(link): |
958 | if not PERMALINK_URL_PREFIX: |
959 | return "../"*PERMALINK_STRUCTURE.count("/")+link |
960 | return link |
961 | |
962 | def permalinkStatusIcon(node,index,alt): |
963 | i=node.appendChild(doc.createElement("img")) |
964 | i.setAttribute("src",normalizeLink(PERMALINK_STATUSICONS[index])) |
965 | i.setAttribute("alt",alt) |
966 | |
967 | def writePermalink(entry,files,links,prevLink="",nextLink=""): |
968 | if prevLink: prevLink=normalizeLink(prevLink) |
969 | if nextLink: nextLink=normalizeLink(nextLink) |
970 | |
971 | permalink=genPermalink(entry[DATETIME]) |
972 | permafile=genPermafile(entry[DATETIME]) |
973 | permdir=os.path.dirname(permafile) |
974 | if os.path.exists(permdir): |
975 | if not os.path.isdir(permdir): sys.exit("cvslog2web: file blocking PERMALINKDIR "+permdir) |
976 | else: |
977 | os.makedirs(permdir) |
978 | |
979 | msg=entry[MESSAGE].splitlines() |
980 | |
981 | # entry header tags |
982 | if ENTRYTITLE==TITLE_MESSAGE_FIRST_LINE and len(msg)>0: |
983 | title=FEEDENTRYPREFIX+msg[0] |
984 | elif ENTRYTITLE==TITLE_FILE_LIST or len(msg)==0: |
985 | title=FEEDENTRYPREFIX+files |
986 | else: |
987 | sys.exit("cvslog2web: bad ENTRYTITLE setting") |
988 | |
989 | dom=xml.dom.getDOMImplementation() |
990 | if not PERMALINK_STANDALONE: |
991 | if "createDocumentFragment" in dir(dom): |
992 | doc=dom.createDocumentFragment() |
993 | else: |
994 | doc=dom.createDocument(None,None,None) |
995 | rootdiv=doc.appendChild(doc.createElement("div")) |
996 | rootdiv.setAttribute("xmlns","http://www.w3.org/1999/xhtml") |
997 | rootdiv.appendChild(doc.createTextNode("\n")) |
998 | rootdiv.setAttribute("class","cvslog2web_permalink") |
999 | else: |
1000 | doc,rootdiv=HTMLHeader(dom,title,PERMALINK_CSS,"cvslog2web_permalink") |
1001 | |
1002 | node=rootdiv.appendChild(doc.createElement("div")) |
1003 | node.setAttribute("class",PERMALINK_OUTERCLASS) |
1004 | node.appendChild(doc.createTextNode("\n")) |
1005 | |
1006 | for section in PERMALINK_ORDER: |
1007 | # Previous link |
1008 | if section==PL_PREVLINK: |
1009 | n=node.appendChild(doc.createElement("div")) |
1010 | if prevLink: |
1011 | n.setAttribute("class","cvslog2web_nav_prev") |
1012 | if PERMALINK_ADDJS: n.setAttribute("onClick","window.location.href='"+prevLink+"'") |
1013 | a=n.appendChild(doc.createElement("a")) |
1014 | a.setAttribute("href",prevLink) |
1015 | a.appendChild(doc.createTextNode(PL_PREVTEXT)) |
1016 | else: |
1017 | n.setAttribute("class","cvslog2web_nav_prev_disabled") |
1018 | n.appendChild(doc.createTextNode(PL_PREVTEXT)) |
1019 | |
1020 | # Previous link |
1021 | elif section==PL_NEXTLINK: |
1022 | n=node.appendChild(doc.createElement("div")) |
1023 | if nextLink: |
1024 | n.setAttribute("class","cvslog2web_nav_next") |
1025 | if PERMALINK_ADDJS: n.setAttribute("onClick","window.location.href='"+nextLink+"'") |
1026 | a=n.appendChild(doc.createElement("a")) |
1027 | a.setAttribute("href",nextLink) |
1028 | a.appendChild(doc.createTextNode(PL_NEXTTEXT)) |
1029 | else: |
1030 | n.setAttribute("class","cvslog2web_nav_next_disabled") |
1031 | n.appendChild(doc.createTextNode(PL_NEXTTEXT)) |
1032 | |
1033 | # Feed link |
1034 | elif section==PL_FEEDLINK and FEEDOUT and FEEDSELFLINK: |
1035 | n=node.appendChild(doc.createElement("div")) |
1036 | n.setAttribute("class","cvslog2web_feedlink") |
1037 | a=n.appendChild(doc.createElement("a")) |
1038 | a.setAttribute("href",FEEDSELFLINK) |
1039 | i=a.appendChild(doc.createElement("img")) |
1040 | i.setAttribute("src",normalizeLink(PERMALINK_IMG_PREFIX+"atom_feed.png")) |
1041 | i.setAttribute("width","84") |
1042 | i.setAttribute("height","15") |
1043 | i.setAttribute("alt","Atom") |
1044 | a.appendChild(doc.createTextNode(" ")) |
1045 | i=a.appendChild(doc.createElement("img")) |
1046 | i.setAttribute("src",normalizeLink(PERMALINK_IMG_PREFIX+"feed_icon.png")) |
1047 | i.setAttribute("width","15") |
1048 | i.setAttribute("height","15") |
1049 | i.setAttribute("alt","RSS") |
1050 | |
1051 | # Title (Timestamp) |
1052 | elif section==PL_TIMESTAMP: |
1053 | if time.daylight: |
1054 | t=entry[DATETIME]-datetime.timedelta(seconds=time.altzone) |
1055 | tz=time.tzname[1] |
1056 | else: |
1057 | t=entry[DATETIME]-datetime.timedelta(seconds=time.timezone) |
1058 | tz=time.tzname[0] |
1059 | title=t.strftime("Commited %a. %B %d, %Y at %I:%M:%S %p "+tz) |
1060 | appendTextualTag(node,"div",title).setAttribute("class","cvslog2web_timestamp") |
1061 | |
1062 | # Authors |
1063 | elif section==PL_AUTHORS: |
1064 | authors=node.appendChild(doc.createElement("div")) |
1065 | authors.setAttribute("class","cvslog2web_authors") |
1066 | authors.appendChild(doc.createTextNode("\nfrom ")) |
1067 | first=True |
1068 | for n in entry[AUTHORS].itervalues(): |
1069 | if first: first=False |
1070 | else: authors.appendChild(doc.createTextNode(", ")) |
1071 | l=appendTextualTag(authors,"a",n[0]) |
1072 | l.setAttribute("href","mailto:"+n[1]) |
1073 | |
1074 | # Message |
1075 | elif section==PL_MESSAGE: |
1076 | content=node.appendChild(doc.createElement("div")) |
1077 | content.setAttribute("class","cvslog2web_message") |
1078 | for i,m in enumerate(msg): |
1079 | content.appendChild(doc.createTextNode("\n"+m)) |
1080 | content.appendChild(doc.createElement("br")) |
1081 | content.appendChild(doc.createTextNode("\n")) |
1082 | |
1083 | # Links |
1084 | elif section==PL_FILELIST: |
1085 | links=node.appendChild(doc.createElement("div")) |
1086 | links.setAttribute("class","cvslog2web_filelist") |
1087 | links.appendChild(doc.createTextNode("\n")) |
1088 | spaths=entry[PATH].keys() |
1089 | spaths.sort() |
1090 | for path in spaths: |
1091 | info=entry[PATH][path] |
1092 | status=links.appendChild(doc.createElement("span")) |
1093 | status.setAttribute("class","cvslog2web_filestatus") |
1094 | if len(PERMALINK_STATUSICONS)==3: |
1095 | if info[2]==ADDED: permalinkStatusIcon(status, 0, "A") |
1096 | elif info[2]==MODIFIED: permalinkStatusIcon(status, 1, "M") |
1097 | elif info[2]==REMOVED: permalinkStatusIcon(status, 2, "R") |
1098 | else: sys.exit("cvslog2web: bad entry[PATH] status flag") |
1099 | else: |
1100 | if info[2]==ADDED: status.appendChild(doc.createTextNode("A")) |
1101 | elif info[2]==MODIFIED: status.appendChild(doc.createTextNode("M")) |
1102 | elif info[2]==REMOVED: status.appendChild(doc.createTextNode("R")) |
1103 | else: sys.exit("cvslog2web: bad entry[PATH] status flag") |
1104 | a=links.appendChild(doc.createElement("a")) |
1105 | lid=dict(path=path,filename=os.path.basename(path),oldversion=vers2str(info[0]),newversion=vers2str(info[1])) |
1106 | if lid["oldversion"] and lid["newversion"]: |
1107 | a.setAttribute("href",(DIFFLINKFORMAT % lid).replace(" ","%20")) |
1108 | else: |
1109 | a.setAttribute("href",(VIEWLINKFORMAT % lid).replace(" ","%20")) |
1110 | a.appendChild(doc.createTextNode(dropModules(path))) |
1111 | links.appendChild(doc.createElement("br")) |
1112 | links.appendChild(doc.createTextNode("\n")) |
1113 | if len(entry)>IMPORTEDFILES and len(entry)>IMPORTEDTAG: #just for backward compatability |
1114 | for f in entry[IMPORTEDFILES]: |
1115 | status=links.appendChild(doc.createElement("span")) |
1116 | status.setAttribute("class","cvslog2web_filestatus") |
1117 | status.appendChild(doc.createTextNode(f[0])) |
1118 | a=links.appendChild(doc.createElement("a")) |
1119 | lid=dict(path=f[1],filename=os.path.basename(f[1]),oldversion="",newversion=entry[IMPORTEDTAG]) |
1120 | a.setAttribute("href",(VIEWLINKFORMAT % lid).replace(" ","%20")) |
1121 | a.appendChild(doc.createTextNode(dropModules(f[1]))) |
1122 | links.appendChild(doc.createElement("br")) |
1123 | links.appendChild(doc.createTextNode("\n")) |
1124 | |
1125 | # Special content (user defined string?) |
1126 | elif section.__class__=="".__class__: |
1127 | if not section.strip().startswith("<"): |
1128 | # doesn't start with a tag, we append as text |
1129 | node.appendChild(doc.createTextNode(section)) |
1130 | else: |
1131 | # Have to parse it so we can append it |
1132 | # If we just appended it as text, all the good stuff would be escaped away |
1133 | subdoc=xml.dom.minidom.parseString(section) |
1134 | for n in subdoc.childNodes: |
1135 | node.appendChild(subdoc.removeChild(n)) |
1136 | subdoc.unlink() |
1137 | |
1138 | #put a return in the source after eact section |
1139 | node.appendChild(doc.createTextNode("\n")) |
1140 | |
1141 | if PERMALINK_STANDALONE: |
1142 | gen=rootdiv.parentNode.appendChild(doc.createElement("div")) |
1143 | else: |
1144 | gen=rootdiv.appendChild(doc.createElement("div")) |
1145 | |
1146 | gen.setAttribute("class","cvslog2web_credit") |
1147 | gen.appendChild(doc.createTextNode("Generated by ")) |
1148 | a=appendTextualTag(gen,"a","cvslog2web") |
1149 | a.setAttribute("target","_top") |
1150 | a.setAttribute("href",SCRIPT_URL) |
1151 | gen.appendChild(doc.createTextNode(SCRIPT_VERSION)) |
1152 | |
1153 | f=open(permafile,"wb") |
1154 | if PERMALINK_STANDALONE or "createDocumentFragment" in dir(dom): |
1155 | doc.writexml(f) |
1156 | else: |
1157 | # createDocumentFragment unavailable, hack it and strip the xml processing instruction |
1158 | s=doc.toxml() |
1159 | s=s[s.find("\n")+1:] |
1160 | f.write(s) |
1161 | f.close() |
1162 | doc.unlink() |
1163 | |
1164 | if VERBOSE: |
1165 | print "Permalink generated:", genPermalink(entry[DATETIME]) |
1166 | |
1167 | def genPermalinkEntry(i): |
1168 | if i<len(history): return genPermalink(history[i][DATETIME]) |
1169 | return None |
1170 | |
1171 | if PERMALINKDIR: |
1172 | if REBUILDPERMALINKS: |
1173 | # Using this section will rewrite all the permalinks in the history (handy for development...) |
1174 | writePermalink(history[0],histFiles[0],histLinks[0],genPermalinkEntry(1)) |
1175 | for i in range(1,len(history)-1): |
1176 | writePermalink(history[i],histFiles[i],histLinks[i],genPermalinkEntry(i+1),genPermalinkEntry(i-1)) |
1177 | if len(sys.argv)<3 and len(history)>=MAXHISTORY: |
1178 | pass #don't do last permalink if it's a rebuilt -- would break link chain because we don't have previous |
1179 | elif len(droppedHistory)==0: |
1180 | i=len(history)-1 |
1181 | writePermalink(history[i],histFiles[i],histLinks[i],None,genPermalinkEntry(i-1)) |
1182 | else: |
1183 | i=len(history)-1 |
1184 | writePermalink(history[i],histFiles[i],histLinks[i],genPermalink(droppedHistory[-1][DATETIME]),genPermalinkEntry(i-1)) |
1185 | else: |
1186 | writePermalink(history[0],histFiles[0],histLinks[0],genPermalinkEntry(1)) |
1187 | curpl=genPermalinkEntry(0) |
1188 | if len(history)>1: |
1189 | writePermalink(history[1],histFiles[1],histLinks[1],genPermalinkEntry(2),curpl) |