Commits

Anonymous committed a5457b0

renamed to resimar
changed version to 0.2.0

Comments (0)

Files changed (3)

+# -*- coding: utf-8 -*-
+
+__author__ = "Christian Kaula <chris@christiankaula.com>"
+__version__ = "0.2.0"
+# __url__ = "http://wiki.chad.org/SmartyPantsPy"
+__description__ = "REallySImpleMARkup for stuff for which BBCode would be overkill."
+
+import hashlib
+import re
+import tidy
+
+class Block(object):
+	"""
+	defines one block of rsm text
+	"""
+	
+	def __init__(self, content, meta=None):
+		self.meta = meta
+		self.content = content
+		
+		# array in which we keep urls
+		self.links = []
+		
+	def nl2br(self):
+		self.content = self.content.replace('\n', '<br />\n')
+
+
+	def create_nice_url(self, match):
+		if match.group(0).__len__() > 50:
+			name = match.group(0)[0:50] + u'[...]'
+		else:
+			name = match.group(0)
+		link = u'<a href="' + match.group(0) + '">' + name + '</a>'
+		link_hash = hashlib.md5(link).hexdigest()
+		self.links.append((link, link_hash))
+		return '$' + link_hash + '$'
+
+	def parse_urls(self):
+		# probs to Oz http://regexlib.com/REDetails.aspx?regexp_id=1719
+		# with changes of mine
+		url_re =  re.compile(r"(http://|https://|ftp://)([a-zA-Z0-9]+\.[a-zA-Z0-9\-]+|[a-zA-Z0-9\-]+)\.[a-zA-Z\.]{2,6}(/[a-zA-Z0-9\.\?=/#%&\+-_]+|/|)")
+		self.content = url_re.sub(self.create_nice_url, self.content)
+
+
+	def create_nice_email_url(self, match):
+		link = u'<a href="mailto:' + match.group(0) + '">' + match.group(0) + '</a>'
+		link_hash = hashlib.md5(link).hexdigest()
+		self.links.append((link, link_hash))
+		return '$' + link_hash + '$'
+
+	def parse_emails(self):
+		# probs to Roger Ramjet http://regexlib.com/REDetails.aspx?regexp_id=328
+		email_re =  re.compile(r"(((\"[^\"\f\n\r\t\v\b]+\")|([\w\!\#\$\%\&\'\*\+\-\~\/\^\`\|\{\}]+(\.[\w\!\#\$\%\&\'\*\+\-\~\/\^\`\|\{\}]+)*))@((\[(((25[0-5])|(2[0-4][0-9])|([0-1]?[0-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|([0-1]?[0-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|([0-1]?[0-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|([0-1]?[0-9]?[0-9])))\])|(((25[0-5])|(2[0-4][0-9])|([0-1]?[0-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|([0-1]?[0-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|([0-1]?[0-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|([0-1]?[0-9]?[0-9])))|((([A-Za-z0-9\-])+\.)+[A-Za-z\-]+)))")
+		self.content = email_re.sub(self.create_nice_email_url, self.content)
+
+	
+	def resub_links(self):
+		"""get back in the urls we replaced with a safe string earlier"""
+		for link, link_hash in self.links:
+			# print u'$' + link_hash + u'$', link
+			self.content = self.content.replace(u'$' + link_hash + u'$', link)
+
+
+	def parse_short_markup(self):
+		"""parses short markup like _this_"""
+		b_re = re.compile(r'\*(?P<content>[\w\d]+?)\*', re.UNICODE)
+		self.content = b_re.sub('<b>\g<content></b>', self.content)
+		i_re = re.compile(r'/(?P<content>[\w\d]+?)/', re.UNICODE)
+		self.content = i_re.sub('<i>\g<content></i>', self.content)
+		s_re = re.compile(r'-(?P<content>[\w\d]+?)-', re.UNICODE)
+		self.content = s_re.sub('<del>\g<content></del>', self.content)
+		u_re = re.compile(r'_(?P<content>[\w\d]+?)_', re.UNICODE)
+		self.content = u_re.sub('<u>\g<content></u>', self.content)
+
+
+	def parse_long_markup(self):
+		db_re = re.compile(r'\*\*(?P<content>.*?)\*\*', re.UNICODE)
+		self.content = db_re.sub('<b>\g<content></b>', self.content)
+		di_re = re.compile(r'//(?P<content>.*?)//', re.UNICODE)
+		self.content = di_re.sub('<i>\g<content></i>', self.content)
+		ds_re = re.compile(r'--(?P<content>.*?)--', re.UNICODE)
+		self.content = ds_re.sub('<del>\g<content></del>', self.content)
+		du_re = re.compile(r'__(?P<content>.*?)__', re.UNICODE)
+		self.content = du_re.sub('<u>\g<content></u>', self.content)
+ 
+	
+	def parse(self):
+		# self.sanitize()
+		self.parse_urls()
+		self.parse_emails()
+		# self.parse_short_markup()
+		self.parse_long_markup()
+		# self.brake_long_words()
+		self.resub_links()
+
+
+class ParagraphBlock(Block):
+	def parse(self):
+		super(ParagraphBlock, self).parse()
+		self.nl2br()
+		# BUG: call nl2br somewhere appropriate
+		return u'<p>\n%s\n</p>' % self.content
+
+
+class CodeBlock(Block):
+	def parse(self):
+		super(CodeBlock, self).parse()
+		endcode_re = re.compile(r'\n:code\s*$')
+		self.content = endcode_re.sub('', self.content)
+		output = u'<code>\n%s\n</code>' % self.content
+		if self.meta:
+			output = u'<div class="code-meta">%s</div>\n' % self.meta + output
+		return output
+		
+
+class QuoteBlock(Block):
+	def parse(self):
+		super(QuoteBlock, self).parse()
+		# BUG: call nl2br somewhere appendppropriate
+		endquote_re = re.compile(r'\n:quote\s*$')
+		self.content = endquote_re.sub('', self.content)
+		output = u'<p>\n%s\n</p>' % self.content
+		if self.meta:
+			output = u'<div class="quote-meta">\n%s\n</div>\n' % self.meta + output
+		return u'<blockquote>\n%s\n</blockquote>' % output
+	
+
+class ListBlock(Block):
+	def parse_list(self):
+		li_re = re.compile(r'(?:^|\n)\s*(?:\*|-|#)\s+', re.UNICODE)
+		lis = li_re.split(self.content)
+		output = []
+		for li in lis:
+			if not li.strip(' \n\r') == '':
+				output.append('<li>\n  %s\n</li>' % li.strip(' \n\r').replace('\n', '<br />\n'))
+		self.content = '\n'.join(output)
+	
+	def parse(self):
+		content = self.parse_list()
+		super(ListBlock, self).parse()
+
+
+class OrderedListBlock(ListBlock):
+	def parse(self):
+		super(OrderedListBlock, self).parse()
+		output = u'<ol>\n%s\n</ol>' % self.content
+		return output
+		
+
+class UnorderedListBlock(ListBlock):
+	def parse(self):
+		super(UnorderedListBlock, self).parse()
+		output = u'<ul>\n%s\n</ul>' % self.content
+		return output
+
+
+# TODO: make possible to escape stuff
+# TODO: trim too long words/links
+# TODO: make urls nameable
+# TODO: think of something for _stuff_ cd-rom-drive
+class ResimarText(object):
+	"""
+	instance of a text that has been parsed by sm
+
+	input: original input
+	blocks: blocks of text
+	output: final parsed text
+	"""
+	
+	def __init__(self, input, tidy=True):
+		if input == None:
+			self.content = u''
+			return
+			
+		else:
+			# make sure input is unicode
+			if not isinstance(input, unicode):
+				input = input.decode('utf8')
+			self.content = input
+		
+		self.blocks = []
+		self.code_open = 0
+		self.quote_open = 0
+		
+		# self.input = input
+		# self.content = self.input
+
+
+	def brake_long_word(self, match):
+		print match.group(0)[::30]
+		return match.group(0)
+	
+	def sanitize(self):
+		"""
+		gets rid of tags by replacing < and > by the corresponding html entities
+		replace carriage returns and stuff that doesnt belong with newlines
+		also replaces newlines that are too much
+		"""
+		# tags have to go first
+		self.content = self.content.replace('<', '&lt;')
+		self.content = self.content.replace('>', '&gt;')
+		# remove tabs
+		self.content = self.content.replace('\t', '')
+		# normalize linebreaks
+		nl_re = re.compile(r'(\r\n|\r)', re.UNICODE)
+		self.content = nl_re.sub('\n', self.content)
+		# now we get rid of messy linebreaks
+		br_re = re.compile(r'(\n){3,}', re.UNICODE)
+		self.content = br_re.sub('\n\n', self.content)
+		# remove messy spaces
+		space_re = re.compile(r'(\n+) +', re.UNICODE)
+		# print space_re.findall(self.content)
+		self.content = space_re.sub(r'\1', self.content)
+		# break long words
+		long_re = re.compile(r'[a-zA-Z0-9]{50,}', re.UNICODE)
+		self.content = long_re.sub(self.brake_long_word, self.content)
+
+	
+	def split_blocks(self):
+		"""
+		a block is a paragraph of text defined by two newlines
+		split blocks of text and convert them into objects
+		"""
+		for blockstring in self.content.strip(' \n\r').split('\n\n'):
+			type_re = re.compile(r'^(?P<type>\w+):(?P<meta>.*?)\n(?P<content>.*)', re.DOTALL | re.UNICODE)
+			match = type_re.match(blockstring)
+			
+			if match:
+				if match.group('type'):
+					blocktype = match.group('type').lower()
+				else:
+					blocktype = None
+				if match.group('meta'):
+					blockmeta = match.group('meta')
+				else:
+					blockmeta = None
+			
+			blockident = blockstring[0:2]
+			# print self.content
+			# print 'bs', blockstring
+			# print 'bi', blockident
+
+			if match and blocktype and blocktype in ('code', 'quote'):
+				if blocktype == 'code':
+					self.blocks.append(CodeBlock(match.group('content'), blockmeta))
+				elif blocktype == 'quote':
+					self.blocks.append(QuoteBlock(match.group('content'), blockmeta))
+					
+			elif blockident == '* ' or blockident == '- ':
+				self.blocks.append(UnorderedListBlock(content=blockstring))
+			elif blockident == '# ':
+				self.blocks.append(OrderedListBlock(content=blockstring))
+			else:
+				self.blocks.append(ParagraphBlock(blockstring))
+
+
+	def tidy(self):
+		# if isinstance(self.content, basestring):
+		# 	tidy_input = self.content
+		# elif not isinstance(self.content, basestring):
+		# 	try:
+		# 		tidy_input = str(self.content)
+		# 	except UnicodeEncodeError:
+		# 		tidy_input = unicode(self.content)
+
+		output = tidy.parseString(self.content.encode('utf8'), char_encoding='utf8', indent=True, output_xhtml=True, show_body_only=True).__str__().decode('utf8')
+		if not isinstance(output, basestring,):
+			if not isinstance(output, unicode):
+				output = output.decode('utf8')
+			else:
+				output = unicode(str(output), 'utf8')
+		self.content = output
+
+
+	def parse(self):
+		if self.content == u'':
+			return u''
+			
+		self.sanitize()
+		self.split_blocks()
+
+		output = ''
+		for block in self.blocks:
+			output = output + block.parse() + '\n'
+
+		if self.tidy:
+			self.tidy()
+
+		return output
+	
+
+def resimar(text, **kwargs):
+	# print [SimpleMarkupText(text, **kwargs).output]
+	return ResimarText(text, **kwargs).parse()
+

simple_markup.py

-# -*- coding: utf-8 -*-
-
-__author__ = "Christian Kaula <chris@christiankaula.com>"
-__version__ = "0.1.3"
-# __url__ = "http://wiki.chad.org/SmartyPantsPy"
-__description__ = "Simple Markup for stuff for which BBCode would be overkill."
-
-import hashlib
-import re
-import tidy
-
-class Block(object):
-	"""
-	defines one block of rsm text
-	"""
-	
-	def __init__(self, content, meta=None):
-		self.meta = meta
-		self.content = content
-		
-		# array in which we keep urls
-		self.links = []
-		
-	def nl2br(self):
-		self.content = self.content.replace('\n', '<br />\n')
-
-
-	def create_nice_url(self, match):
-		if match.group(0).__len__() > 50:
-			name = match.group(0)[0:50] + u'[...]'
-		else:
-			name = match.group(0)
-		link = u'<a href="' + match.group(0) + '">' + name + '</a>'
-		link_hash = hashlib.md5(link).hexdigest()
-		self.links.append((link, link_hash))
-		return '$' + link_hash + '$'
-
-	def parse_urls(self):
-		# probs to Oz http://regexlib.com/REDetails.aspx?regexp_id=1719
-		# with changes of mine
-		url_re =  re.compile(r"(http://|https://|ftp://)([a-zA-Z0-9]+\.[a-zA-Z0-9\-]+|[a-zA-Z0-9\-]+)\.[a-zA-Z\.]{2,6}(/[a-zA-Z0-9\.\?=/#%&\+-_]+|/|)")
-		self.content = url_re.sub(self.create_nice_url, self.content)
-
-
-	def create_nice_email_url(self, match):
-		link = u'<a href="mailto:' + match.group(0) + '">' + match.group(0) + '</a>'
-		link_hash = hashlib.md5(link).hexdigest()
-		self.links.append((link, link_hash))
-		return '$' + link_hash + '$'
-
-	def parse_emails(self):
-		# probs to Roger Ramjet http://regexlib.com/REDetails.aspx?regexp_id=328
-		email_re =  re.compile(r"(((\"[^\"\f\n\r\t\v\b]+\")|([\w\!\#\$\%\&\'\*\+\-\~\/\^\`\|\{\}]+(\.[\w\!\#\$\%\&\'\*\+\-\~\/\^\`\|\{\}]+)*))@((\[(((25[0-5])|(2[0-4][0-9])|([0-1]?[0-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|([0-1]?[0-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|([0-1]?[0-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|([0-1]?[0-9]?[0-9])))\])|(((25[0-5])|(2[0-4][0-9])|([0-1]?[0-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|([0-1]?[0-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|([0-1]?[0-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|([0-1]?[0-9]?[0-9])))|((([A-Za-z0-9\-])+\.)+[A-Za-z\-]+)))")
-		self.content = email_re.sub(self.create_nice_email_url, self.content)
-
-	
-	def resub_links(self):
-		"""get back in the urls we replaced with a safe string earlier"""
-		for link, link_hash in self.links:
-			# print u'$' + link_hash + u'$', link
-			self.content = self.content.replace(u'$' + link_hash + u'$', link)
-
-
-	def parse_short_markup(self):
-		"""parses short markup like _this_"""
-		b_re = re.compile(r'\*(?P<content>[\w\d]+?)\*', re.UNICODE)
-		self.content = b_re.sub('<b>\g<content></b>', self.content)
-		i_re = re.compile(r'/(?P<content>[\w\d]+?)/', re.UNICODE)
-		self.content = i_re.sub('<i>\g<content></i>', self.content)
-		s_re = re.compile(r'-(?P<content>[\w\d]+?)-', re.UNICODE)
-		self.content = s_re.sub('<del>\g<content></del>', self.content)
-		u_re = re.compile(r'_(?P<content>[\w\d]+?)_', re.UNICODE)
-		self.content = u_re.sub('<u>\g<content></u>', self.content)
-
-
-	def parse_long_markup(self):
-		db_re = re.compile(r'\*\*(?P<content>.*?)\*\*', re.UNICODE)
-		self.content = db_re.sub('<b>\g<content></b>', self.content)
-		di_re = re.compile(r'//(?P<content>.*?)//', re.UNICODE)
-		self.content = di_re.sub('<i>\g<content></i>', self.content)
-		ds_re = re.compile(r'--(?P<content>.*?)--', re.UNICODE)
-		self.content = ds_re.sub('<del>\g<content></del>', self.content)
-		du_re = re.compile(r'__(?P<content>.*?)__', re.UNICODE)
-		self.content = du_re.sub('<u>\g<content></u>', self.content)
- 
-	
-	def parse(self):
-		# self.sanitize()
-		self.parse_urls()
-		self.parse_emails()
-		# self.parse_short_markup()
-		self.parse_long_markup()
-		# self.brake_long_words()
-		self.resub_links()
-
-
-class ParagraphBlock(Block):
-	def parse(self):
-		super(ParagraphBlock, self).parse()
-		self.nl2br()
-		# BUG: call nl2br somewhere appropriate
-		return u'<p>\n%s\n</p>' % self.content
-
-
-class CodeBlock(Block):
-	def parse(self):
-		super(CodeBlock, self).parse()
-		endcode_re = re.compile(r'\n:code\s*$')
-		self.content = endcode_re.sub('', self.content)
-		output = u'<code>\n%s\n</code>' % self.content
-		if self.meta:
-			output = u'<div class="code-meta">%s</div>\n' % self.meta + output
-		return output
-		
-
-class QuoteBlock(Block):
-	def parse(self):
-		super(QuoteBlock, self).parse()
-		# BUG: call nl2br somewhere appendppropriate
-		endquote_re = re.compile(r'\n:quote\s*$')
-		self.content = endquote_re.sub('', self.content)
-		output = u'<p>\n%s\n</p>' % self.content
-		if self.meta:
-			output = u'<div class="quote-meta">\n%s\n</div>\n' % self.meta + output
-		return u'<blockquote>\n%s\n</blockquote>' % output
-	
-
-class ListBlock(Block):
-	def parse_list(self):
-		li_re = re.compile(r'(?:^|\n)\s*(?:\*|-|#)\s+', re.UNICODE)
-		lis = li_re.split(self.content)
-		output = []
-		for li in lis:
-			if not li.strip(' \n\r') == '':
-				output.append('<li>\n  %s\n</li>' % li.strip(' \n\r').replace('\n', '<br />\n'))
-		self.content = '\n'.join(output)
-	
-	def parse(self):
-		content = self.parse_list()
-		super(ListBlock, self).parse()
-
-
-class OrderedListBlock(ListBlock):
-	def parse(self):
-		super(OrderedListBlock, self).parse()
-		output = u'<ol>\n%s\n</ol>' % self.content
-		return output
-		
-
-class UnorderedListBlock(ListBlock):
-	def parse(self):
-		super(UnorderedListBlock, self).parse()
-		output = u'<ul>\n%s\n</ul>' % self.content
-		return output
-
-
-# TODO: make possible to escape stuff
-# TODO: trim too long words/links
-# TODO: make urls nameable
-# TODO: think of something for _stuff_ cd-rom-drive
-class SimpleMarkupText(object):
-	"""
-	instance of a text that has been parsed by sm
-
-	input: original input
-	blocks: blocks of text
-	output: final parsed text
-	"""
-	
-	def __init__(self, input, tidy=True):
-		if input == None:
-			self.content = u''
-			return
-			
-		else:
-			# make sure input is unicode
-			if not isinstance(input, unicode):
-				input = input.decode('utf8')
-			self.content = input
-		
-		self.blocks = []
-		self.code_open = 0
-		self.quote_open = 0
-		
-		# self.input = input
-		# self.content = self.input
-
-
-	def brake_long_word(self, match):
-		print match.group(0)[::30]
-		return match.group(0)
-	
-	def sanitize(self):
-		"""
-		gets rid of tags by replacing < and > by the corresponding html entities
-		replace carriage returns and stuff that doesnt belong with newlines
-		also replaces newlines that are too much
-		"""
-		# tags have to go first
-		self.content = self.content.replace('<', '&lt;')
-		self.content = self.content.replace('>', '&gt;')
-		# remove tabs
-		self.content = self.content.replace('\t', '')
-		# normalize linebreaks
-		nl_re = re.compile(r'(\r\n|\r)', re.UNICODE)
-		self.content = nl_re.sub('\n', self.content)
-		# now we get rid of messy linebreaks
-		br_re = re.compile(r'(\n){3,}', re.UNICODE)
-		self.content = br_re.sub('\n\n', self.content)
-		# remove messy spaces
-		space_re = re.compile(r'(\n+) +', re.UNICODE)
-		# print space_re.findall(self.content)
-		self.content = space_re.sub(r'\1', self.content)
-		# break long words
-		long_re = re.compile(r'[a-zA-Z0-9]{50,}', re.UNICODE)
-		self.content = long_re.sub(self.brake_long_word, self.content)
-
-	
-	def split_blocks(self):
-		"""
-		a block is a paragraph of text defined by two newlines
-		split blocks of text and convert them into objects
-		"""
-		for blockstring in self.content.strip(' \n\r').split('\n\n'):
-			type_re = re.compile(r'^(?P<type>\w+):(?P<meta>.*?)\n(?P<content>.*)', re.DOTALL | re.UNICODE)
-			match = type_re.match(blockstring)
-			
-			if match:
-				if match.group('type'):
-					blocktype = match.group('type').lower()
-				else:
-					blocktype = None
-				if match.group('meta'):
-					blockmeta = match.group('meta')
-				else:
-					blockmeta = None
-			
-			blockident = blockstring[0:2]
-			# print self.content
-			# print 'bs', blockstring
-			# print 'bi', blockident
-
-			if match and blocktype and blocktype in ('code', 'quote'):
-				if blocktype == 'code':
-					self.blocks.append(CodeBlock(match.group('content'), blockmeta))
-				elif blocktype == 'quote':
-					self.blocks.append(QuoteBlock(match.group('content'), blockmeta))
-					
-			elif blockident == '* ' or blockident == '- ':
-				self.blocks.append(UnorderedListBlock(content=blockstring))
-			elif blockident == '# ':
-				self.blocks.append(OrderedListBlock(content=blockstring))
-			else:
-				self.blocks.append(ParagraphBlock(blockstring))
-
-
-	def tidy(self):
-		# if isinstance(self.content, basestring):
-		# 	tidy_input = self.content
-		# elif not isinstance(self.content, basestring):
-		# 	try:
-		# 		tidy_input = str(self.content)
-		# 	except UnicodeEncodeError:
-		# 		tidy_input = unicode(self.content)
-
-		output = tidy.parseString(self.content.encode('utf8'), char_encoding='utf8', indent=True, output_xhtml=True, show_body_only=True).__str__().decode('utf8')
-		if not isinstance(output, basestring,):
-			if not isinstance(output, unicode):
-				output = output.decode('utf8')
-			else:
-				output = unicode(str(output), 'utf8')
-		self.content = output
-
-
-	def parse(self):
-		if self.content == u'':
-			return u''
-			
-		self.sanitize()
-		self.split_blocks()
-
-		output = ''
-		for block in self.blocks:
-			output = output + block.parse() + '\n'
-
-		if self.tidy:
-			self.tidy()
-
-		return output
-	
-
-def simple_markup(text, **kwargs):
-	# print [SimpleMarkupText(text, **kwargs).output]
-	return SimpleMarkupText(text, **kwargs).parse()
-
-# print simple_markup(text)
-# print simple_markup(text2)
-# print simple_markup(text3)
-# print simple_markup(text4)
-# print simple_markup(text9)
-# print simple_markup(None)
-# print simple_markup('doerthe-beckert@gmx.de')
-# print simple_markup(text10)
-# print simple_markup(u"""Nulla sed eros id neque dictum fringilla.\n\nMorbi eros felis, congue nec, pharetra nec, cursus a, odio.\n\nUt pharetra tincidunt eros.""")
 # -*- coding: utf-8 -*-
 import sys
 sys.path.insert(1, '.')
-from simple_markup import simple_markup
+from resimar import resimar 
 # from BeautifulSoup import BeautifulSoup
 import unittest
 
 	# =================
 	
 	def test_none(self):
-		self.failUnlessEqual(simple_markup(None), u'')
+		self.failUnlessEqual(resimar(None), u'')
 
 	
 	def test_unicode(self):
-		self.failUnlessEqual(nl(simple_markup('äöü')), u'<p>äöü</p>')
-		self.failUnlessEqual(nl(simple_markup(u'äöü')), u'<p>äöü</p>')
+		self.failUnlessEqual(nl(resimar('äöü')), u'<p>äöü</p>')
+		self.failUnlessEqual(nl(resimar(u'äöü')), u'<p>äöü</p>')
 
 	
 	def test_blocksplitting(self):
 			(u'nulla  \n\n  blublu \n\n blarg', 6)
 		)
 		for s, c in pairs:
-			output = simple_markup(s, tidy=False)
+			output = resimar(s, tidy=False)
 			self.failUnlessEqual(ct(output, 'p'), c, output)
-			output = simple_markup(s)
+			output = resimar(s)
 			self.failUnlessEqual(ct(output, 'p'), c)
 
 
 			""", 2)
 		)
 		for s, c in pairs:
-			output = simple_markup(s, tidy=False)
+			output = resimar(s, tidy=False)
 			self.failUnlessEqual(ct(output, 'b'), c)
-			output = simple_markup(s)
+			output = resimar(s)
 			self.failUnlessEqual(ct(output, 'b'), c)
 
 
 			""", 2)
 		)
 		for s, c in pairs:
-			output = simple_markup(s, tidy=False)
+			output = resimar(s, tidy=False)
 			self.failUnlessEqual(ct(output, 'del'), c)
-			output = simple_markup(s)
+			output = resimar(s)
 			self.failUnlessEqual(ct(output, 'del'), c)
 
 
 			""", 2)
 		)
 		for s, c in pairs:
-			output = simple_markup(s, tidy=False)
+			output = resimar(s, tidy=False)
 			self.failUnlessEqual(ct(output, 'i'), c)
-			output = simple_markup(s)
+			output = resimar(s)
 			self.failUnlessEqual(ct(output, 'i'), c)
 
 
 			""", 2)
 		)
 		for s, c in pairs:
-			output = simple_markup(s, tidy=False)
+			output = resimar(s, tidy=False)
 			self.failUnlessEqual(ct(output, 'u'), c)
-			output = simple_markup(s)
+			output = resimar(s)
 			self.failUnlessEqual(ct(output, 'u'), c)
 
 
 			),
 		)
 		for s, o in pairs:
-			output = simple_markup(s, tidy=False)
+			output = resimar(s, tidy=False)
 			self.failUnlessEqual(nl(output), o)
-			output = simple_markup(s)
+			output = resimar(s)
 			self.failUnlessEqual(nl(output), o)
 
 
 			(u'(mbierhahn@fh-hof.de)', u'<p>(<a href="mailto:mbierhahn@fh-hof.de">mbierhahn@fh-hof.de</a>)</p>')
 		)
 		for s, o in pairs:
-			output = simple_markup(s, tidy=False)
+			output = resimar(s, tidy=False)
 			self.failUnlessEqual(nl(output), o)
-			output = simple_markup(s)
+			output = resimar(s)
 			self.failUnlessEqual(nl(output), o)
 		
 
 			(u"""#  sdfol\n  # sdfol\n  #   sdfol\n""", 2, 6),
 		)
 		for s, c, cc in pairs:
-			output = simple_markup(s, tidy=False)
+			output = resimar(s, tidy=False)
 			self.failUnlessEqual(ct(output, 'ol'), c)
 			self.failUnlessEqual(ct(output, 'li'), cc)
 			# print output
-			output = simple_markup(s)
+			output = resimar(s)
 			self.failUnlessEqual(ct(output, 'ol'), c)
 			self.failUnlessEqual(ct(output, 'li'), cc, output)
 
     * Intuos3 Grip Pen''', 2, 4),
 		)
 		for s, c, cc in pairs:
-			output = simple_markup(s, tidy=False)
+			output = resimar(s, tidy=False)
 			# print output
 			self.failUnlessEqual(ct(output, 'ul'), c)
 			self.failUnlessEqual(ct(output, 'li'), cc)
-			output = simple_markup(s)
+			output = resimar(s)
 			# print output
 			self.failUnlessEqual(ct(output, 'ul'), c)
 			self.failUnlessEqual(ct(output, 'li'), cc)
 
 	
 	def test_list_markup(self):
-		output = simple_markup(u'- **bold**', tidy=False)
+		output = resimar(u'- **bold**', tidy=False)
 		self.failUnlessEqual(ct(output, 'ul'), 2)
 		self.failUnlessEqual(ct(output, 'li'), 2)
 		self.failUnlessEqual(ct(output, 'b'), 2)
-		output = simple_markup(u'- **bold**')
+		output = resimar(u'- **bold**')
 		self.failUnlessEqual(ct(output, 'ul'), 2)
 		self.failUnlessEqual(ct(output, 'li'), 2)
 		self.failUnlessEqual(ct(output, 'b'), 2)
 
-		output = simple_markup(u'- //emphasized//', tidy=False)
+		output = resimar(u'- //emphasized//', tidy=False)
 		self.failUnlessEqual(ct(output, 'ul'), 2)
 		self.failUnlessEqual(ct(output, 'li'), 2)
 		self.failUnlessEqual(ct(output, 'i'), 2)
-		output = simple_markup(u'- //emphasized//')
+		output = resimar(u'- //emphasized//')
 		self.failUnlessEqual(ct(output, 'ul'), 2)
 		self.failUnlessEqual(ct(output, 'li'), 2)
 		self.failUnlessEqual(ct(output, 'i'), 2)
 
-		output = simple_markup(u'- --strikethrough--', tidy=False)
+		output = resimar(u'- --strikethrough--', tidy=False)
 		self.failUnlessEqual(ct(output, 'ul'), 2)
 		self.failUnlessEqual(ct(output, 'li'), 2)
 		self.failUnlessEqual(ct(output, 'del'), 2)
-		output = simple_markup(u'- --strikethrough--')
+		output = resimar(u'- --strikethrough--')
 		self.failUnlessEqual(ct(output, 'ul'), 2)
 		self.failUnlessEqual(ct(output, 'li'), 2)
 		self.failUnlessEqual(ct(output, 'del'), 2)
 
-		output = simple_markup(u'- __underline__', tidy=False)
+		output = resimar(u'- __underline__', tidy=False)
 		self.failUnlessEqual(ct(output, 'ul'), 2)
 		self.failUnlessEqual(ct(output, 'li'), 2)
 		self.failUnlessEqual(ct(output, 'u'), 2)
-		output = simple_markup(u'- __underline__')
+		output = resimar(u'- __underline__')
 		self.failUnlessEqual(ct(output, 'ul'), 2)
 		self.failUnlessEqual(ct(output, 'li'), 2)
 		self.failUnlessEqual(ct(output, 'u'), 2)
 					(bs_s3, bs_r3),
 				)
 				for s, r in pairs:
-					self.failUnlessEqual(simple_markup(s), r)
+					self.failUnlessEqual(resimar(s), r)
 			:code
 			""", 1, 2),
 			(u'code:\ndef blockstring(self):\n:code', 0, 2),
 		)
 		for s, c, cc in pairs:
-			output = simple_markup(s, tidy=False)
+			output = resimar(s, tidy=False)
 			print output
 			self.failUnlessEqual(ct(output, u'div'), c)
 			self.failUnlessEqual(ct(output, u'code'), cc)
 			# print 'dbg', output.split('\n')[-3:-2], u':code'
 			self.failIf(output.count(u':code') > 0)
-			# output = simple_markup(s)
+			# output = resimar(s)
 			self.failUnlessEqual(ct(output, u'div'), c)
 			self.failUnlessEqual(ct(output, u'code'), cc)
 			self.failIf(output.count(u':code') > 0)
 			(u'code:c\nline1\nline2\n\nline3\nline4\n:code', 1, 2),
 		)
 		for s, c, cc in pairs:
-			output = simple_markup(s, tidy=False)
+			output = resimar(s, tidy=False)
 			print output
 			self.failUnlessEqual(ct(output, u'div'), c)
 			self.failUnlessEqual(ct(output, u'code'), cc)
 			print 'dbg', output.split('\n')[-3:-2]
 			self.failIf(output.count(u':code') > 0)
-			# output = simple_markup(s)
+			# output = resimar(s)
 			self.failUnlessEqual(ct(output, u'div'), c)
 			self.failUnlessEqual(ct(output, u'code'), cc)
 			self.failIf(output.count(u':code') > 0)
 			(u'quote: blubber\nbleargh\n', 1, 2, 2),
 		)
 		for s, c, cc, ccc in pairs:
-			output = simple_markup(s, tidy=False)
+			output = resimar(s, tidy=False)
 			print output
 			self.failUnlessEqual(ct(output, u'div'), c)
 			self.failUnlessEqual(ct(output, u'blockquote'), cc)
 			self.failUnlessEqual(ct(output, u'p'), ccc)
 			self.failIf(output.count(u':quote') > 0)
-			output = simple_markup(s)
+			output = resimar(s)
 			print output
 			self.failUnlessEqual(ct(output, u'div'), c)
 			self.failUnlessEqual(ct(output, u'blockquote'), cc)
 			(u'code:c\nline1\nline2\n\nline3\nline4\n:code', 1, 2),
 		)
 		for s, c, cc in pairs:
-			output = simple_markup(s, tidy=False)
+			output = resimar(s, tidy=False)
 			print output
 			self.failUnlessEqual(ct(output, u'div'), c)
 			self.failUnlessEqual(ct(output, u'code'), cc)
 			print 'dbg', output.split('\n')[-3:-2]
 			self.failIf(output.count(u':quote') > 0)
-			# output = simple_markup(s)
+			# output = resimar(s)
 			self.failUnlessEqual(ct(output, u'div'), c)
 			self.failUnlessEqual(ct(output, u'code'), cc)
 			self.failIf(output.count(u':quote') > 0)
 - Im About-Bereich und wahrscheinlich in jedem Bereich der viel Inhalt enthält, fehlt unten der grüne footer-Bereich.
 """
 
-print simple_markup(text11)
+print resimar(text11)
 
 if __name__ == '__main__': unittest.main()