Commits

Georg Brandl committed ea295cf

Fixes #748: clean up deprecation and resource warnings in the test suite when run with python3 -Wd.

  • Participants
  • Parent commits de45f95

Comments (0)

Files changed (9)

tests/test_basic_api.py

         for x in filters.FILTERS.keys():
             lx = lexers.PythonLexer()
             lx.add_filter(x, **filter_args.get(x, {}))
-            text = open(TESTFILE, 'rb').read().decode('utf-8')
+            fp = open(TESTFILE, 'rb')
+            try:
+                text = fp.read().decode('utf-8')
+            finally:
+                fp.close()
             tokens = list(lx.get_tokens(text))
             roundtext = ''.join([t[1] for t in tokens])
             if x not in ('whitespace', 'keywordcase'):
                 # these filters change the text
-                self.assertEquals(roundtext, text,
-                                  "lexer roundtrip with %s filter failed" % x)
+                self.assertEqual(roundtext, text,
+                                 "lexer roundtrip with %s filter failed" % x)
 
     def test_raiseonerror(self):
         lx = lexers.PythonLexer()
     def test_whitespace(self):
         lx = lexers.PythonLexer()
         lx.add_filter('whitespace', spaces='%')
-        text = open(TESTFILE, 'rb').read().decode('utf-8')
+        fp = open(TESTFILE, 'rb')
+        try:
+            text = fp.read().decode('utf-8')
+        finally:
+            fp.close()
         lxtext = ''.join([t[1] for t in list(lx.get_tokens(text))])
-        self.failIf(' ' in lxtext)
+        self.assertFalse(' ' in lxtext)
 
     def test_keywordcase(self):
         lx = lexers.PythonLexer()
         lx.add_filter('keywordcase', case='capitalize')
-        text = open(TESTFILE, 'rb').read().decode('utf-8')
+        fp = open(TESTFILE, 'rb')
+        try:
+            text = fp.read().decode('utf-8')
+        finally:
+            fp.close()
         lxtext = ''.join([t[1] for t in list(lx.get_tokens(text))])
-        self.assert_('Def' in lxtext and 'Class' in lxtext)
+        self.assertTrue('Def' in lxtext and 'Class' in lxtext)
 
     def test_codetag(self):
         lx = lexers.PythonLexer()
         lx.add_filter('codetagify')
         text = u'# BUG: text'
         tokens = list(lx.get_tokens(text))
-        self.assertEquals('# ', tokens[0][1])
-        self.assertEquals('BUG', tokens[1][1])
+        self.assertEqual('# ', tokens[0][1])
+        self.assertEqual('BUG', tokens[1][1])
 
     def test_codetag_boundary(self):
         # ticket #368
         lx.add_filter('codetagify')
         text = u'# DEBUG: text'
         tokens = list(lx.get_tokens(text))
-        self.assertEquals('# DEBUG: text', tokens[0][1])
+        self.assertEqual('# DEBUG: text', tokens[0][1])

tests/test_cmdline.py

 
     def test_L_opt(self):
         c, o, e = run_cmdline("-L")
-        self.assertEquals(c, 0)
-        self.assert_("Lexers" in o and "Formatters" in o and
-                     "Filters" in o and "Styles" in o)
+        self.assertEqual(c, 0)
+        self.assertTrue("Lexers" in o and "Formatters" in o and
+                        "Filters" in o and "Styles" in o)
         c, o, e = run_cmdline("-L", "lexer")
-        self.assertEquals(c, 0)
-        self.assert_("Lexers" in o and "Formatters" not in o)
+        self.assertEqual(c, 0)
+        self.assertTrue("Lexers" in o and "Formatters" not in o)
         c, o, e = run_cmdline("-L", "lexers")
-        self.assertEquals(c, 0)
+        self.assertEqual(c, 0)
 
     def test_O_opt(self):
         filename = TESTFILE
         c, o, e = run_cmdline("-Ofull=1,linenos=true,foo=bar",
                               "-fhtml", filename)
-        self.assertEquals(c, 0)
-        self.assert_("<html" in o)
-        self.assert_('class="linenos"' in o)
+        self.assertEqual(c, 0)
+        self.assertTrue("<html" in o)
+        self.assertTrue('class="linenos"' in o)
 
     def test_P_opt(self):
         filename = TESTFILE
         c, o, e = run_cmdline("-Pfull", "-Ptitle=foo, bar=baz=,",
                               "-fhtml", filename)
-        self.assertEquals(c, 0)
-        self.assert_("<title>foo, bar=baz=,</title>" in o)
+        self.assertEqual(c, 0)
+        self.assertTrue("<title>foo, bar=baz=,</title>" in o)
 
     def test_F_opt(self):
         filename = TESTFILE
         c, o, e = run_cmdline("-Fhighlight:tokentype=Name.Blubb,"
                               "names=TESTFILE filename",
                               "-fhtml", filename)
-        self.assertEquals(c, 0)
-        self.assert_('<span class="n-Blubb' in o)
+        self.assertEqual(c, 0)
+        self.assertTrue('<span class="n-Blubb' in o)
 
     def test_H_opt(self):
         c, o, e = run_cmdline("-H", "formatter", "html")
-        self.assertEquals(c, 0)
-        self.assert_('HTML' in o)
+        self.assertEqual(c, 0)
+        self.assertTrue('HTML' in o)
 
     def test_S_opt(self):
         c, o, e = run_cmdline("-S", "default", "-f", "html", "-O", "linenos=1")
-        self.assertEquals(c, 0)
+        self.assertEqual(c, 0)
 
     def test_invalid_opts(self):
         for opts in [("-L", "-lpy"), ("-L", "-fhtml"), ("-L", "-Ox"),
                      ("-a",), ("-Sst", "-lpy"), ("-H",),
                      ("-H", "formatter"),]:
-            self.assert_(run_cmdline(*opts)[0] == 2)
+            self.assertTrue(run_cmdline(*opts)[0] == 2)
 
     def test_normal(self):
         # test that cmdline gives the same output as library api
         from pygments.lexers import PythonLexer
         from pygments.formatters import HtmlFormatter
         filename = TESTFILE
-        code = open(filename, 'rb').read()
+        fp = open(filename, 'rb')
+        try:
+            code = fp.read()
+        finally:
+            fp.close()
 
         output = highlight(code, PythonLexer(), HtmlFormatter())
 
         c, o, e = run_cmdline("-lpython", "-fhtml", filename)
 
-        self.assertEquals(o, output)
-        self.assertEquals(e, "")
-        self.assertEquals(c, 0)
+        self.assertEqual(o, output)
+        self.assertEqual(e, "")
+        self.assertEqual(c, 0)

tests/test_examplefiles.py

         yield check_lexer, lx, absfn, outfn
 
 def check_lexer(lx, absfn, outfn):
-    text = open(absfn, 'rb').read()
+    fp = open(absfn, 'rb')
+    try:
+        text = fp.read()
+    finally:
+        fp.close()
     text = text.replace(b('\r\n'), b('\n'))
     text = text.strip(b('\n')) + b('\n')
     try:

tests/test_html_formatter.py

 
 TESTFILE, TESTDIR = support.location(__file__)
 
-tokensource = list(PythonLexer().get_tokens(
-    uni_open(TESTFILE, encoding='utf-8').read()))
+fp = uni_open(TESTFILE, encoding='utf-8')
+try:
+    tokensource = list(PythonLexer().get_tokens(fp.read()))
+finally:
+    fp.close()
 
 
 class HtmlFormatterTest(unittest.TestCase):
 
         stripped_html = re.sub('<.*?>', '', houtfile.getvalue())
         escaped_text = escape_html(noutfile.getvalue())
-        self.assertEquals(stripped_html, escaped_text)
+        self.assertEqual(stripped_html, escaped_text)
 
     def test_external_css(self):
         # test correct behavior
         fmt1.format(tokensource, tfile)
         try:
             fmt2.format(tokensource, tfile)
-            self.assert_(isfile(join(TESTDIR, 'fmt2.css')))
+            self.assertTrue(isfile(join(TESTDIR, 'fmt2.css')))
         except IOError:
             # test directory not writable
             pass
         tfile.close()
 
-        self.assert_(isfile(join(dirname(tfile.name), 'fmt1.css')))
+        self.assertTrue(isfile(join(dirname(tfile.name), 'fmt1.css')))
         os.unlink(join(dirname(tfile.name), 'fmt1.css'))
         try:
             os.unlink(join(TESTDIR, 'fmt2.css'))
         fmt = HtmlFormatter(**optdict)
         fmt.format(tokensource, outfile)
         html = outfile.getvalue()
-        self.assert_(re.search("<pre>\s+1\s+2\s+3", html))
+        self.assertTrue(re.search("<pre>\s+1\s+2\s+3", html))
 
     def test_linenos_with_startnum(self):
         optdict = dict(linenos=True, linenostart=5)
         fmt = HtmlFormatter(**optdict)
         fmt.format(tokensource, outfile)
         html = outfile.getvalue()
-        self.assert_(re.search("<pre>\s+5\s+6\s+7", html))
+        self.assertTrue(re.search("<pre>\s+5\s+6\s+7", html))
 
     def test_lineanchors(self):
         optdict = dict(lineanchors="foo")
         fmt = HtmlFormatter(**optdict)
         fmt.format(tokensource, outfile)
         html = outfile.getvalue()
-        self.assert_(re.search("<pre><a name=\"foo-1\">", html))
+        self.assertTrue(re.search("<pre><a name=\"foo-1\">", html))
 
     def test_lineanchors_with_startnum(self):
         optdict = dict(lineanchors="foo", linenostart=5)
         fmt = HtmlFormatter(**optdict)
         fmt.format(tokensource, outfile)
         html = outfile.getvalue()
-        self.assert_(re.search("<pre><a name=\"foo-5\">", html))
+        self.assertTrue(re.search("<pre><a name=\"foo-5\">", html))
 
     def test_valid_output(self):
         # test all available wrappers
         catname = os.path.join(TESTDIR, 'dtds', 'HTML4.soc')
         try:
             import subprocess
-            ret = subprocess.Popen(['nsgmls', '-s', '-c', catname, pathname],
-                                   stdout=subprocess.PIPE).wait()
+            po = subprocess.Popen(['nsgmls', '-s', '-c', catname, pathname],
+                                  stdout=subprocess.PIPE)
+            ret = po.wait()
+            po.stdout.close()
         except OSError:
             # nsgmls not available
             pass
         else:
-            self.failIf(ret, 'nsgmls run reported errors')
+            self.assertFalse(ret, 'nsgmls run reported errors')
 
         os.unlink(pathname)
 
     def test_get_style_defs(self):
         fmt = HtmlFormatter()
         sd = fmt.get_style_defs()
-        self.assert_(sd.startswith('.'))
+        self.assertTrue(sd.startswith('.'))
 
         fmt = HtmlFormatter(cssclass='foo')
         sd = fmt.get_style_defs()
-        self.assert_(sd.startswith('.foo'))
+        self.assertTrue(sd.startswith('.foo'))
         sd = fmt.get_style_defs('.bar')
-        self.assert_(sd.startswith('.bar'))
+        self.assertTrue(sd.startswith('.bar'))
         sd = fmt.get_style_defs(['.bar', '.baz'])
         fl = sd.splitlines()[0]
-        self.assert_('.bar' in fl and '.baz' in fl)
+        self.assertTrue('.bar' in fl and '.baz' in fl)
 
     def test_unicode_options(self):
         fmt = HtmlFormatter(title=u'Föö',

tests/test_latex_formatter.py

 class LatexFormatterTest(unittest.TestCase):
 
     def test_valid_output(self):
-        tokensource = list(PythonLexer().get_tokens(open(TESTFILE).read()))
+        fp = open(TESTFILE)
+        try:
+            tokensource = list(PythonLexer().get_tokens(fp.read()))
+        finally:
+            fp.close()
         fmt = LatexFormatter(full=True, encoding='latin1')
 
         handle, pathname = tempfile.mkstemp('.tex')
         tfile.close()
         try:
             import subprocess
-            ret = subprocess.Popen(['latex', '-interaction=nonstopmode',
-                                    pathname],
-                                   stdout=subprocess.PIPE).wait()
+            po = subprocess.Popen(['latex', '-interaction=nonstopmode',
+                                   pathname], stdout=subprocess.PIPE)
+            ret = po.wait()
+            po.stdout.close()
         except OSError:
             # latex not available
             pass
         else:
-            self.failIf(ret, 'latex run reported errors')
+            self.assertFalse(ret, 'latex run reported errors')
 
         os.unlink(pathname)
         os.chdir(old_wd)

tests/test_regexlexer.py

     def test(self):
         lx = TestLexer()
         toks = list(lx.get_tokens_unprocessed('abcde'))
-        self.assertEquals(toks,
+        self.assertEqual(toks,
            [(0, Text.Root, 'a'), (1, Text.Rag, 'b'), (2, Text.Rag, 'c'),
             (3, Text.Beer, 'd'), (4, Text.Root, 'e')])

tests/test_token.py

 """
 
 import unittest
-import StringIO
-import sys
 
 from pygments import token
 
 class TokenTest(unittest.TestCase):
 
     def test_tokentype(self):
-        e = self.assertEquals
-        r = self.assertRaises
+        e = self.assertEqual
 
         t = token.String
 
         e(t.__class__, token._TokenType)
 
     def test_functions(self):
-        self.assert_(token.is_token_subtype(token.String, token.String))
-        self.assert_(token.is_token_subtype(token.String, token.Literal))
-        self.failIf(token.is_token_subtype(token.Literal, token.String))
+        self.assertTrue(token.is_token_subtype(token.String, token.String))
+        self.assertTrue(token.is_token_subtype(token.String, token.Literal))
+        self.assertFalse(token.is_token_subtype(token.Literal, token.String))
 
-        self.assert_(token.string_to_tokentype(token.String) is token.String)
-        self.assert_(token.string_to_tokentype('') is token.Token)
-        self.assert_(token.string_to_tokentype('String') is token.String)
+        self.assertTrue(token.string_to_tokentype(token.String) is token.String)
+        self.assertTrue(token.string_to_tokentype('') is token.Token)
+        self.assertTrue(token.string_to_tokentype('String') is token.String)
 
     def test_sanity_check(self):
         stp = token.STANDARD_TYPES.copy()

tests/test_using_api.py

         expected = [(Text, 'a'), (String, '"'), (Keyword, 'bcd'),
                     (String, '"'), (Text, 'e\n')]
         t = list(TestLexer().get_tokens('a"bcd"e'))
-        self.assertEquals(t, expected)
+        self.assertEqual(t, expected)
 
     def test_error(self):
         def gen():

tests/test_util.py

 """
 
 import unittest
-import os
 
 from pygments import util
 
 
     def test_getoptions(self):
         raises = self.assertRaises
-        equals = self.assertEquals
+        equals = self.assertEqual
 
         equals(util.get_bool_opt({}, 'a', True), True)
         equals(util.get_bool_opt({}, 'a', 1), True)
             other text
             """
 
-        self.assertEquals(util.docstring_headline(f1), "docstring headline")
-        self.assertEquals(util.docstring_headline(f2), "docstring headline")
+        self.assertEqual(util.docstring_headline(f1), "docstring headline")
+        self.assertEqual(util.docstring_headline(f2), "docstring headline")
 
     def test_analysator_returns_float(self):
         # If an analysator wrapped by make_analysator returns a floating point
         # number, then that number will be returned by the wrapper.
-        self.assertEquals(FakeLexer.analyse('0.5'), 0.5)
+        self.assertEqual(FakeLexer.analyse('0.5'), 0.5)
 
     def test_analysator_returns_boolean(self):
         # If an analysator wrapped by make_analysator returns a boolean value,
         # then the wrapper will return 1.0 if the boolean was True or 0.0 if
         # it was False.
-        self.assertEquals(FakeLexer.analyse(True), 1.0)
-        self.assertEquals(FakeLexer.analyse(False), 0.0)
+        self.assertEqual(FakeLexer.analyse(True), 1.0)
+        self.assertEqual(FakeLexer.analyse(False), 0.0)
 
     def test_analysator_raises_exception(self):
         # If an analysator wrapped by make_analysator raises an exception,
             def analyse(text):
                 raise RuntimeError('something bad happened')
             analyse = util.make_analysator(analyse)
-        self.assertEquals(ErrorLexer.analyse(''), 0.0)
+        self.assertEqual(ErrorLexer.analyse(''), 0.0)
 
     def test_analysator_value_error(self):
         # When converting the analysator's return value to a float a
         # ValueError may occur.  If that happens 0.0 is returned instead.
-        self.assertEquals(FakeLexer.analyse('bad input'), 0.0)
+        self.assertEqual(FakeLexer.analyse('bad input'), 0.0)
 
     def test_analysator_type_error(self):
         # When converting the analysator's return value to a float a
         # TypeError may occur.  If that happens 0.0 is returned instead.
-        self.assertEquals(FakeLexer.analyse(None), 0.0)
+        self.assertEqual(FakeLexer.analyse(None), 0.0)
 
     def test_shebang_matches(self):
-        self.assert_(util.shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?'))
-        self.assert_(util.shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?'))
-        self.assert_(util.shebang_matches('#!/usr/bin/startsomethingwith python',
-                                          r'python(2\.\d)?'))
-        self.assert_(util.shebang_matches('#!C:\\Python2.4\\Python.exe',
-                                          r'python(2\.\d)?'))
+        self.assertTrue(util.shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?'))
+        self.assertTrue(util.shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?'))
+        self.assertTrue(util.shebang_matches('#!/usr/bin/startsomethingwith python',
+                                             r'python(2\.\d)?'))
+        self.assertTrue(util.shebang_matches('#!C:\\Python2.4\\Python.exe',
+                                             r'python(2\.\d)?'))
 
-        self.failIf(util.shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?'))
-        self.failIf(util.shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?'))
-        self.failIf(util.shebang_matches('#!', r'python'))
+        self.assertFalse(util.shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?'))
+        self.assertFalse(util.shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?'))
+        self.assertFalse(util.shebang_matches('#!', r'python'))
 
     def test_doctype_matches(self):
-        self.assert_(util.doctype_matches('<!DOCTYPE html PUBLIC "a"> <html>',
-                                          'html.*'))
-        self.failIf(util.doctype_matches('<?xml ?> <DOCTYPE html PUBLIC "a"> <html>',
-                                         'html.*'))
-        self.assert_(util.html_doctype_matches(
+        self.assertTrue(util.doctype_matches(
+            '<!DOCTYPE html PUBLIC "a"> <html>', 'html.*'))
+        self.assertFalse(util.doctype_matches(
+            '<?xml ?> <DOCTYPE html PUBLIC "a"> <html>', 'html.*'))
+        self.assertTrue(util.html_doctype_matches(
             '<?xml ?><!DOCTYPE html PUBLIC  "-//W3C//DTD XHTML 1.0 Strict//EN">'))
 
     def test_xml(self):
-        self.assert_(util.looks_like_xml(
+        self.assertTrue(util.looks_like_xml(
             '<?xml ?><!DOCTYPE html PUBLIC  "-//W3C//DTD XHTML 1.0 Strict//EN">'))
-        self.assert_(util.looks_like_xml('<html xmlns>abc</html>'))
-        self.failIf(util.looks_like_xml('<html>'))
+        self.assertTrue(util.looks_like_xml('<html xmlns>abc</html>'))
+        self.assertFalse(util.looks_like_xml('<html>'))