for i, token in enumerate(self._tokens[:-1]):
- # if we have a paragraph break, insert that and go on to next token
+ # if we have a paragraph break, insert that and go on to
self.edited_text += u'\n\n'
self.edited_text += token.str
- # now figure out if a space should follow it
+ # now figure out how many spaces should follow it
next_token = self._tokens[i + 1]
if quote_stack and quote_stack[-1] == token.str:
# space after close quote
# no space after open quote
- self.edited_text += u' '
+ self.edited_text += append_space
def _process_tokens(self, infile):
all_rules = rules.get_rules()