Commits

Simon Sapin  committed 85d114f

Remove no-op generators.

Generators already are iterators, and the iter() function returns iterators
(with internal state and a next() method) for any iterable.
Iterators are iterable too, so iter() is idempotent.

  • Participants
  • Parent commits ad30e9a

Comments (0)

Files changed (8)

File src/cssutils/css/cssmediarule.py

                 
                 self.cssRules = cssutils.css.CSSRuleList()
                 seq = [] # not used really
-                
-                tokenizer = (t for t in cssrulestokens) # TODO: not elegant!
-                wellformed, expected = self._parse(braceOrEOF, 
-                                                   seq, 
+
+                tokenizer = iter(cssrulestokens)
+                wellformed, expected = self._parse(braceOrEOF,
+                                                   seq,
                                                    tokenizer, {
                                                      'COMMENT': COMMENT,
                                                      'CHARSET_SYM': atrule,

File src/cssutils/css/csspagerule.py

 
     def __parseMarginAndStyle(self, tokens):
         "tokens is a list, no generator (yet)"        
-        g = (t for t in tokens)
+        g = iter(tokens)
         styletokens = []
         
         # new rules until parse done

File src/cssutils/css/selector.py

                 else:
                     tokens.append(t)
 
-            # TODO: back to generator but not elegant at all!
-            tokenizer = (t for t in tokens) 
+            tokenizer = iter(tokens)
 
             # for closures: must be a mutable
             new = {'context': [''], # stack of: 'attrib', 'negation', 'pseudo'

File src/cssutils/prodparser.py

             # OLD: (token, tokens) or a single token
             if len(text) == 2:
                 # (token, tokens)
-                def gen(token, tokens):
-                    "new generator appending token and tokens"
-                    yield token
-                    for t in tokens:
-                        yield t
-
-                return (t for t in gen(*text))
-
+                chain([token], tokens)
             else:
                 # single token
-                return  (t for t in [text])
+                return iter([text])
             
         elif isinstance(text, list):
             # OLD: generator from list
-            return (t for t in text)
+            return iter(text)
         
         else:
             # DEFAULT, already tokenized, assume generator
     def _SorTokens(self, tokens, until=',/'):
         """New tokens generator which has S tokens removed,
         if followed by anything in ``until``, normally a ``,``."""
-        def removedS(tokens):
-            for token in tokens:
-                if token[0] == self.types.S:
-                    try:
-                        next_ = tokens.next()
-                    except StopIteration:
-                        yield token
-                    else:
-                        if next_[1] in until:
-                            # omit S as e.g. ``,`` has been found
-                            yield next_
-                        elif next_[0] == self.types.COMMENT:
-                            # pass COMMENT
-                            yield next_
-                        else:
-                            yield token
-                            yield next_
-
-                elif token[0] == self.types.COMMENT:
-                    # pass COMMENT
+        for token in tokens:
+            if token[0] == self.types.S:
+                try:
+                    next_ = tokens.next()
+                except StopIteration:
                     yield token
                 else:
-                    yield token
-                    break
-            # normal mode again
-            for token in tokens:
+                    if next_[1] in until:
+                        # omit S as e.g. ``,`` has been found
+                        yield next_
+                    elif next_[0] == self.types.COMMENT:
+                        # pass COMMENT
+                        yield next_
+                    else:
+                        yield token
+                        yield next_
+
+            elif token[0] == self.types.COMMENT:
+                # pass COMMENT
                 yield token
+            else:
+                yield token
+                break
+        # normal mode again
+        for token in tokens:
+            yield token
 
-        return (token for token in removedS(tokens))
 
     def parse(self, text, name, productions, keepS=False, store=None):
         """

File src/cssutils/util.py

 __version__ = '$Id$'
 
 from helper import normalize
-from itertools import ifilter
+from itertools import ifilter, chain
 import cssutils
 import codec
 import codecs
             # needs to be tokenized
             return self.__tokenizer2.tokenize(
                  textortokens)
-        elif isinstance(textortokens, types.GeneratorType):
-            # already tokenized
-            return textortokens
         elif isinstance(textortokens, tuple):
             # a single token (like a comment)
             return [textortokens]
         else:
-            # already tokenized but return generator
-            return (x for x in textortokens)
+            # already tokenized but return an iterator
+            return iter(textortokens)
 
     def _nexttoken(self, tokenizer, default=None):
         "returns next token in generator tokenizer or the default value"
                 yield initialtoken
                 for item in tokenizer:
                     yield item
-            fulltokenizer = (t for t in tokens())
+            fulltokenizer = chain([initialtoken], tokenizer)
         else:
             fulltokenizer = tokenizer
 
         return encoding, enctype, decodedCssText
     else:
         return None, None, None
-

File src/tests/test_cssstylesheet.py

         self.assertEquals('example', s.namespaces['ex2'])
         self.assertRaises(xml.dom.NamespaceErr, s.namespaces.__getitem__, 'UNSET')
         # __iter__
-        self.assertEquals(['', 'ex2'], [n for n in s.namespaces])
+        self.assertEquals(['', 'ex2'], list(s.namespaces))
         # __len__
         self.assertEqual(2, len(s.namespaces))
         # __setitem__

File src/tests/test_cssvariablesdeclaration.py

         
         items = []
         # unsorted!
-        self.assertEquals(sorted([k for k in v]), ['x', 'z'])
+        self.assertEquals(sorted(v), ['x', 'z'])
         
         del v['z']        
         self.assertEqual(1, v.length)

File src/tests/test_tokenize2.py

                 self.assertEqual(expected, actual)
 
             # check if all same number of tokens
-            tokens = [t for t in tokenizer.tokenize(css)]
+            tokens = list(tokenizer.tokenize(css))
             self.assertEqual(len(tokens), len(tests[css]))
 
     def test_tokenizefullsheet(self):
                     self.assertEqual(expected, actual)
 
             # check if all same number of tokens
-            tokens = [t for t in tokenizer.tokenize(css, fullsheet=True)]
+            tokens = list(tokenizer.tokenize(css, fullsheet=True))
             # EOF is added so -1
             self.assertEqual(len(tokens) - 1, len(tests[css]))