Changesets can be listed by changeset number.
The Git repository is here.
- Revision:
- 2
- Log:
Initial import of Instiki 0.11.0 sources from a downloaded Tarball.
Instiki is a Ruby On Rails based Wiki clone.
- Author:
- adh
- Date:
- Sat Jul 22 14:54:51 +0100 2006
- Size:
- 12772 Bytes
1 | require 'fileutils' |
2 | require 'redcloth_for_tex' |
3 | require 'parsedate' |
4 | require 'zip/zip' |
5 | |
6 | class WikiController < ApplicationController |
7 | |
8 | caches_action :show, :published, :authors, :recently_revised, :list |
9 | cache_sweeper :revision_sweeper |
10 | |
11 | layout 'default', :except => [:rss_feed, :rss_with_content, :rss_with_headlines, :tex, :export_tex, :export_html] |
12 | |
13 | def index |
14 | if @web_name |
15 | redirect_home |
16 | elsif not @wiki.setup? |
17 | redirect_to :controller => 'admin', :action => 'create_system' |
18 | elsif @wiki.webs.length == 1 |
19 | redirect_home @wiki.webs.values.first.address |
20 | else |
21 | redirect_to :action => 'web_list' |
22 | end |
23 | end |
24 | |
25 | # Outside a single web -------------------------------------------------------- |
26 | |
27 | def authenticate |
28 | if password_check(@params['password']) |
29 | redirect_home |
30 | else |
31 | flash[:info] = password_error(@params['password']) |
32 | redirect_to :action => 'login', :web => @web_name |
33 | end |
34 | end |
35 | |
36 | def login |
37 | # to template |
38 | end |
39 | |
40 | def web_list |
41 | @webs = wiki.webs.values.sort_by { |web| web.name } |
42 | end |
43 | |
44 | |
45 | # Within a single web --------------------------------------------------------- |
46 | |
47 | def authors |
48 | @page_names_by_author = @web.page_names_by_author |
49 | @authors = @page_names_by_author.keys.sort |
50 | end |
51 | |
52 | def export_html |
53 | stylesheet = File.read(File.join(RAILS_ROOT, 'public', 'stylesheets', 'instiki.css')) |
54 | export_pages_as_zip('html') do |page| |
55 | |
56 | renderer = PageRenderer.new(page.revisions.last) |
57 | rendered_page = <<-EOL |
58 | <!DOCTYPE html |
59 | PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" |
60 | "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> |
61 | <html xmlns="http://www.w3.org/1999/xhtml"> |
62 | <head> |
63 | <title>#{page.plain_name} in #{@web.name}</title> |
64 | <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" /> |
65 | |
66 | <style type="text/css"> |
67 | h1#pageName, .newWikiWord a, a.existingWikiWord, .newWikiWord a:hover { |
68 | color: ##{@web ? @web.color : "393" }; |
69 | } |
70 | .newWikiWord { background-color: white; font-style: italic; } |
71 | #{stylesheet} |
72 | </style> |
73 | <style type="text/css"> |
74 | #{@web.additional_style} |
75 | </style> |
76 | </head> |
77 | <body> |
78 | #{renderer.display_content_for_export} |
79 | <div class="byline"> |
80 | #{page.revisions? ? "Revised" : "Created" } on #{ page.revised_at.strftime('%B %d, %Y %H:%M:%S') } |
81 | by |
82 | #{ UrlGenerator.new(self).make_link(page.author.name, @web, nil, { :mode => :export }) } |
83 | </div> |
84 | </body> |
85 | </html> |
86 | EOL |
87 | rendered_page |
88 | end |
89 | end |
90 | |
91 | def export_markup |
92 | export_pages_as_zip(@web.markup) { |page| page.content } |
93 | end |
94 | |
95 | def export_pdf |
96 | file_name = "#{@web.address}-tex-#{@web.revised_at.strftime('%Y-%m-%d-%H-%M-%S')}" |
97 | file_path = File.join(@wiki.storage_path, file_name) |
98 | |
99 | export_web_to_tex "#{file_path}.tex" unless FileTest.exists? "#{file_path}.tex" |
100 | convert_tex_to_pdf "#{file_path}.tex" |
101 | send_file "#{file_path}.pdf" |
102 | end |
103 | |
104 | def export_tex |
105 | file_name = "#{@web.address}-tex-#{@web.revised_at.strftime('%Y-%m-%d-%H-%M-%S')}.tex" |
106 | file_path = File.join(@wiki.storage_path, file_name) |
107 | export_web_to_tex(file_path) unless FileTest.exists?(file_path) |
108 | send_file file_path |
109 | end |
110 | |
111 | def feeds |
112 | @rss_with_content_allowed = rss_with_content_allowed? |
113 | # show the template |
114 | end |
115 | |
116 | def list |
117 | parse_category |
118 | @page_names_that_are_wanted = @pages_in_category.wanted_pages |
119 | @pages_that_are_orphaned = @pages_in_category.orphaned_pages |
120 | end |
121 | |
122 | def recently_revised |
123 | parse_category |
124 | @pages_by_revision = @pages_in_category.by_revision |
125 | @pages_by_day = Hash.new { |h, day| h[day] = [] } |
126 | @pages_by_revision.each do |page| |
127 | day = Date.new(page.revised_at.year, page.revised_at.month, page.revised_at.day) |
128 | @pages_by_day[day] << page |
129 | end |
130 | end |
131 | |
132 | def rss_with_content |
133 | if rss_with_content_allowed? |
134 | render_rss(hide_description = false, *parse_rss_params) |
135 | else |
136 | render_text 'RSS feed with content for this web is blocked for security reasons. ' + |
137 | 'The web is password-protected and not published', '403 Forbidden' |
138 | end |
139 | end |
140 | |
141 | def rss_with_headlines |
142 | render_rss(hide_description = true, *parse_rss_params) |
143 | end |
144 | |
145 | def search |
146 | @query = @params['query'] |
147 | @title_results = @web.select { |page| page.name =~ /#{@query}/i }.sort |
148 | @results = @web.select { |page| page.content =~ /#{@query}/i }.sort |
149 | all_pages_found = (@results + @title_results).uniq |
150 | if all_pages_found.size == 1 |
151 | redirect_to_page(all_pages_found.first.name) |
152 | end |
153 | end |
154 | |
155 | # Within a single page -------------------------------------------------------- |
156 | |
157 | def cancel_edit |
158 | @page.unlock |
159 | redirect_to_page(@page_name) |
160 | end |
161 | |
162 | def edit |
163 | if @page.nil? |
164 | redirect_home |
165 | elsif @page.locked?(Time.now) and not @params['break_lock'] |
166 | redirect_to :web => @web_name, :action => 'locked', :id => @page_name |
167 | else |
168 | @page.lock(Time.now, @author) |
169 | end |
170 | end |
171 | |
172 | def locked |
173 | # to template |
174 | end |
175 | |
176 | def new |
177 | # to template |
178 | end |
179 | |
180 | def pdf |
181 | page = wiki.read_page(@web_name, @page_name) |
182 | safe_page_name = @page.name.gsub(/\W/, '') |
183 | file_name = "#{safe_page_name}-#{@web.address}-#{@page.revised_at.strftime('%Y-%m-%d-%H-%M-%S')}" |
184 | file_path = File.join(@wiki.storage_path, file_name) |
185 | |
186 | export_page_to_tex("#{file_path}.tex") unless FileTest.exists?("#{file_path}.tex") |
187 | # NB: this is _very_ slow |
188 | convert_tex_to_pdf("#{file_path}.tex") |
189 | send_file "#{file_path}.pdf" |
190 | end |
191 | |
192 | def print |
193 | if @page.nil? |
194 | redirect_home |
195 | end |
196 | @link_mode ||= :show |
197 | @renderer = PageRenderer.new(@page.revisions.last) |
198 | # to template |
199 | end |
200 | |
201 | def published |
202 | if not @web.published? |
203 | render(:text => "Published version of web '#{@web_name}' is not available", :status => 404) |
204 | return |
205 | end |
206 | |
207 | page_name = @page_name || 'HomePage' |
208 | page = wiki.read_page(@web_name, page_name) |
209 | render(:text => "Page '#{page_name}' not found", status => 404) and return unless page |
210 | |
211 | @renderer = PageRenderer.new(page.revisions.last) |
212 | end |
213 | |
214 | def revision |
215 | get_page_and_revision |
216 | @show_diff = (@params[:mode] == 'diff') |
217 | @renderer = PageRenderer.new(@revision) |
218 | end |
219 | |
220 | def rollback |
221 | get_page_and_revision |
222 | end |
223 | |
224 | def save |
225 | render(:status => 404, :text => 'Undefined page name') and return if @page_name.nil? |
226 | |
227 | author_name = @params['author'] |
228 | author_name = 'AnonymousCoward' if author_name =~ /^\s*$/ |
229 | cookies['author'] = { :value => author_name, :expires => Time.utc(2030) } |
230 | |
231 | begin |
232 | filter_spam(@params['content']) |
233 | if @page |
234 | wiki.revise_page(@web_name, @page_name, @params['content'], Time.now, |
235 | Author.new(author_name, remote_ip), PageRenderer.new) |
236 | @page.unlock |
237 | else |
238 | wiki.write_page(@web_name, @page_name, @params['content'], Time.now, |
239 | Author.new(author_name, remote_ip), PageRenderer.new) |
240 | end |
241 | redirect_to_page @page_name |
242 | rescue => e |
243 | flash[:error] = e |
244 | logger.error e |
245 | flash[:content] = @params['content'] |
246 | if @page |
247 | @page.unlock |
248 | redirect_to :action => 'edit', :web => @web_name, :id => @page_name |
249 | else |
250 | redirect_to :action => 'new', :web => @web_name, :id => @page_name |
251 | end |
252 | end |
253 | end |
254 | |
255 | def show |
256 | if @page |
257 | begin |
258 | @renderer = PageRenderer.new(@page.revisions.last) |
259 | @show_diff = (@params[:mode] == 'diff') |
260 | render_action 'page' |
261 | # TODO this rescue should differentiate between errors due to rendering and errors in |
262 | # the application itself (for application errors, it's better not to rescue the error at all) |
263 | rescue => e |
264 | logger.error e |
265 | flash[:error] = e.message |
266 | if in_a_web? |
267 | redirect_to :action => 'edit', :web => @web_name, :id => @page_name |
268 | else |
269 | raise e |
270 | end |
271 | end |
272 | else |
273 | if not @page_name.nil? and not @page_name.empty? |
274 | redirect_to :web => @web_name, :action => 'new', :id => @page_name |
275 | else |
276 | render_text 'Page name is not specified', '404 Not Found' |
277 | end |
278 | end |
279 | end |
280 | |
281 | def tex |
282 | @tex_content = RedClothForTex.new(@page.content).to_tex |
283 | end |
284 | |
285 | protected |
286 | |
287 | def connect_to_model |
288 | super |
289 | @page_name = @params['id'] |
290 | @page = @wiki.read_page(@web_name, @page_name) if @page_name |
291 | end |
292 | |
293 | private |
294 | |
295 | def convert_tex_to_pdf(tex_path) |
296 | # TODO remove earlier PDF files with the same prefix |
297 | # TODO handle gracefully situation where pdflatex is not available |
298 | begin |
299 | wd = Dir.getwd |
300 | Dir.chdir(File.dirname(tex_path)) |
301 | logger.info `pdflatex --interaction=nonstopmode #{File.basename(tex_path)}` |
302 | ensure |
303 | Dir.chdir(wd) |
304 | end |
305 | end |
306 | |
307 | def export_page_to_tex(file_path) |
308 | tex |
309 | File.open(file_path, 'w') { |f| f.write(render_to_string(:template => 'wiki/tex', :layout => nil)) } |
310 | end |
311 | |
312 | def export_pages_as_zip(file_type, &block) |
313 | |
314 | file_prefix = "#{@web.address}-#{file_type}-" |
315 | timestamp = @web.revised_at.strftime('%Y-%m-%d-%H-%M-%S') |
316 | file_path = File.join(@wiki.storage_path, file_prefix + timestamp + '.zip') |
317 | tmp_path = "#{file_path}.tmp" |
318 | |
319 | Zip::ZipOutputStream.open(tmp_path) do |zip_out| |
320 | @web.select.by_name.each do |page| |
321 | zip_out.put_next_entry("#{CGI.escape(page.name)}.#{file_type}") |
322 | zip_out.puts(block.call(page)) |
323 | end |
324 | # add an index file, if exporting to HTML |
325 | if file_type.to_s.downcase == 'html' |
326 | zip_out.put_next_entry 'index.html' |
327 | zip_out.puts "<html><head>" + |
328 | "<META HTTP-EQUIV=\"Refresh\" CONTENT=\"0;URL=HomePage.#{file_type}\"></head></html>" |
329 | end |
330 | end |
331 | FileUtils.rm_rf(Dir[File.join(@wiki.storage_path, file_prefix + '*.zip')]) |
332 | FileUtils.mv(tmp_path, file_path) |
333 | send_file file_path |
334 | end |
335 | |
336 | def export_web_to_tex(file_path) |
337 | @tex_content = table_of_contents(@web.page('HomePage').content, render_tex_web) |
338 | File.open(file_path, 'w') { |f| f.write(render_to_string(:template => 'wiki/tex_web', :layout => nil)) } |
339 | end |
340 | |
341 | def get_page_and_revision |
342 | @revision_number = @params['rev'].to_i |
343 | @revision = @page.revisions[@revision_number - 1] |
344 | end |
345 | |
346 | def parse_category |
347 | @categories = WikiReference.list_categories.sort |
348 | @category = @params['category'] |
349 | if @category |
350 | @set_name = "category '#{@category}'" |
351 | pages = WikiReference.pages_in_category(@category).sort.map { |page_name| @web.page(page_name) } |
352 | @pages_in_category = PageSet.new(@web, pages) |
353 | else |
354 | # no category specified, return all pages of the web |
355 | @pages_in_category = @web.select_all.by_name |
356 | @set_name = 'the web' |
357 | end |
358 | end |
359 | |
360 | def parse_rss_params |
361 | if @params.include? 'limit' |
362 | limit = @params['limit'].to_i rescue nil |
363 | limit = nil if limit == 0 |
364 | else |
365 | limit = 15 |
366 | end |
367 | start_date = Time.local(*ParseDate::parsedate(@params['start'])) rescue nil |
368 | end_date = Time.local(*ParseDate::parsedate(@params['end'])) rescue nil |
369 | [ limit, start_date, end_date ] |
370 | end |
371 | |
372 | def remote_ip |
373 | ip = @request.remote_ip |
374 | logger.info(ip) |
375 | ip |
376 | end |
377 | |
378 | def render_rss(hide_description = false, limit = 15, start_date = nil, end_date = nil) |
379 | if limit && !start_date && !end_date |
380 | @pages_by_revision = @web.select.by_revision.first(limit) |
381 | else |
382 | @pages_by_revision = @web.select.by_revision |
383 | @pages_by_revision.reject! { |page| page.revised_at < start_date } if start_date |
384 | @pages_by_revision.reject! { |page| page.revised_at > end_date } if end_date |
385 | end |
386 | |
387 | @hide_description = hide_description |
388 | @link_action = @web.password ? 'published' : 'show' |
389 | |
390 | render :action => 'rss_feed' |
391 | end |
392 | |
393 | def render_tex_web |
394 | @web.select.by_name.inject({}) do |tex_web, page| |
395 | tex_web[page.name] = RedClothForTex.new(page.content).to_tex |
396 | tex_web |
397 | end |
398 | end |
399 | |
400 | def rss_with_content_allowed? |
401 | @web.password.nil? or @web.published? |
402 | end |
403 | |
404 | def truncate(text, length = 30, truncate_string = '...') |
405 | if text.length > length then text[0..(length - 3)] + truncate_string else text end |
406 | end |
407 | |
408 | def filter_spam(content) |
409 | @@spam_patterns ||= load_spam_patterns |
410 | @@spam_patterns.each do |pattern| |
411 | raise "Your edit was blocked by spam filtering" if content =~ pattern |
412 | end |
413 | end |
414 | |
415 | def load_spam_patterns |
416 | spam_patterns_file = "#{RAILS_ROOT}/config/spam_patterns.txt" |
417 | if File.exists?(spam_patterns_file) |
418 | File.readlines(spam_patterns_file).inject([]) { |patterns, line| patterns << Regexp.new(line.chomp, Regexp::IGNORECASE) } |
419 | else |
420 | [] |
421 | end |
422 | end |
423 | |
424 | end |