Commits

Amaury Forgeot d'Arc committed e771175 Merge

hg merge default

Comments (0)

Files changed (71)

lib-python/3.2/test/test_codeccallbacks.py

                 raise TypeError("don't know how to handle %r" % exc)
         codecs.register_error("test.replacing", replacing)
         for (encoding, data) in baddata:
+            print((encoding, data))
             self.assertRaises(TypeError, data.decode, encoding, "test.replacing")
 
         def mutating(exc):

pypy/doc/project-ideas.rst

 PyPy's bytearray type is very inefficient. It would be an interesting
 task to look into possible optimizations on this.
 
+Implement copy-on-write list slicing
+------------------------------------
+
+The idea is to have a special implementation of list objects which is used
+when doing ``myslice = mylist[a:b]``: the new list is not constructed
+immediately, but only when (and if) ``myslice`` or ``mylist`` are mutated.
+
+
 Numpy improvements
 ------------------
 

pypy/doc/translation.rst

     .. image:: image/translation-greyscale-small.png
 
 
-.. _`PDF color version`: image/translation.pdf
+.. _`PDF color version`: https://bitbucket.org/pypy/pypy/raw/default/pypy/doc/image/translation.pdf
 .. _`bytecode evaluator`: interpreter.html
 .. _`abstract interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation
 .. _`Flow Object Space`: objspace.html#the-flow-object-space

pypy/module/cpyext/api.py

     global_code = '\n'.join(global_objects)
 
     prologue = ("#include <Python.h>\n"
-                "#include <src/thread.h>\n")
+                "#include <src/thread.c>\n")
     code = (prologue +
             struct_declaration_code +
             global_code +
                                source_dir / "structseq.c",
                                source_dir / "capsule.c",
                                source_dir / "pysignals.c",
-                               source_dir / "thread.c",
+                               source_dir / "pythread.c",
                                ],
         separate_module_sources=separate_module_sources,
         export_symbols=export_symbols_eci,

pypy/module/cpyext/src/pythread.c

+#include <Python.h>
+#include "pythread.h"
+#include "src/thread.h"
+
+long
+PyThread_get_thread_ident(void)
+{
+    return RPyThreadGetIdent();
+}
+
+PyThread_type_lock
+PyThread_allocate_lock(void)
+{
+    struct RPyOpaque_ThreadLock *lock;
+    lock = malloc(sizeof(struct RPyOpaque_ThreadLock));
+    if (lock == NULL)
+        return NULL;
+
+    if (RPyThreadLockInit(lock) == 0) {
+        free(lock);
+        return NULL;
+    }
+
+    return (PyThread_type_lock)lock;
+}
+
+void
+PyThread_free_lock(PyThread_type_lock lock)
+{
+    struct RPyOpaque_ThreadLock *real_lock = lock;
+    RPyThreadAcquireLock(real_lock, 0);
+    RPyThreadReleaseLock(real_lock);
+    RPyOpaqueDealloc_ThreadLock(real_lock);
+    free(lock);
+}
+
+int
+PyThread_acquire_lock(PyThread_type_lock lock, int waitflag)
+{
+    return RPyThreadAcquireLock((struct RPyOpaqueThreadLock*)lock, waitflag);
+}
+
+void
+PyThread_release_lock(PyThread_type_lock lock)
+{
+    RPyThreadReleaseLock((struct RPyOpaqueThreadLock*)lock);
+}
+
+
+/* ------------------------------------------------------------------------
+Per-thread data ("key") support.
+
+Use PyThread_create_key() to create a new key.  This is typically shared
+across threads.
+
+Use PyThread_set_key_value(thekey, value) to associate void* value with
+thekey in the current thread.  Each thread has a distinct mapping of thekey
+to a void* value.  Caution:  if the current thread already has a mapping
+for thekey, value is ignored.
+
+Use PyThread_get_key_value(thekey) to retrieve the void* value associated
+with thekey in the current thread.  This returns NULL if no value is
+associated with thekey in the current thread.
+
+Use PyThread_delete_key_value(thekey) to forget the current thread's associated
+value for thekey.  PyThread_delete_key(thekey) forgets the values associated
+with thekey across *all* threads.
+
+While some of these functions have error-return values, none set any
+Python exception.
+
+None of the functions does memory management on behalf of the void* values.
+You need to allocate and deallocate them yourself.  If the void* values
+happen to be PyObject*, these functions don't do refcount operations on
+them either.
+
+The GIL does not need to be held when calling these functions; they supply
+their own locking.  This isn't true of PyThread_create_key(), though (see
+next paragraph).
+
+There's a hidden assumption that PyThread_create_key() will be called before
+any of the other functions are called.  There's also a hidden assumption
+that calls to PyThread_create_key() are serialized externally.
+------------------------------------------------------------------------ */
+
+#ifdef MS_WINDOWS
+#include <windows.h>
+
+/* use native Windows TLS functions */
+#define Py_HAVE_NATIVE_TLS
+
+int
+PyThread_create_key(void)
+{
+    return (int) TlsAlloc();
+}
+
+void
+PyThread_delete_key(int key)
+{
+    TlsFree(key);
+}
+
+/* We must be careful to emulate the strange semantics implemented in thread.c,
+ * where the value is only set if it hasn't been set before.
+ */
+int
+PyThread_set_key_value(int key, void *value)
+{
+    BOOL ok;
+    void *oldvalue;
+
+    assert(value != NULL);
+    oldvalue = TlsGetValue(key);
+    if (oldvalue != NULL)
+        /* ignore value if already set */
+        return 0;
+    ok = TlsSetValue(key, value);
+    if (!ok)
+        return -1;
+    return 0;
+}
+
+void *
+PyThread_get_key_value(int key)
+{
+    /* because TLS is used in the Py_END_ALLOW_THREAD macro,
+     * it is necessary to preserve the windows error state, because
+     * it is assumed to be preserved across the call to the macro.
+     * Ideally, the macro should be fixed, but it is simpler to
+     * do it here.
+     */
+    DWORD error = GetLastError();
+    void *result = TlsGetValue(key);
+    SetLastError(error);
+    return result;
+}
+
+void
+PyThread_delete_key_value(int key)
+{
+    /* NULL is used as "key missing", and it is also the default
+     * given by TlsGetValue() if nothing has been set yet.
+     */
+    TlsSetValue(key, NULL);
+}
+
+/* reinitialization of TLS is not necessary after fork when using
+ * the native TLS functions.  And forking isn't supported on Windows either.
+ */
+void
+PyThread_ReInitTLS(void)
+{}
+
+#else  /* MS_WINDOWS */
+
+/* A singly-linked list of struct key objects remembers all the key->value
+ * associations.  File static keyhead heads the list.  keymutex is used
+ * to enforce exclusion internally.
+ */
+struct key {
+    /* Next record in the list, or NULL if this is the last record. */
+    struct key *next;
+
+    /* The thread id, according to PyThread_get_thread_ident(). */
+    long id;
+
+    /* The key and its associated value. */
+    int key;
+    void *value;
+};
+
+static struct key *keyhead = NULL;
+static PyThread_type_lock keymutex = NULL;
+static int nkeys = 0;  /* PyThread_create_key() hands out nkeys+1 next */
+
+/* Internal helper.
+ * If the current thread has a mapping for key, the appropriate struct key*
+ * is returned.  NB:  value is ignored in this case!
+ * If there is no mapping for key in the current thread, then:
+ *     If value is NULL, NULL is returned.
+ *     Else a mapping of key to value is created for the current thread,
+ *     and a pointer to a new struct key* is returned; except that if
+ *     malloc() can't find room for a new struct key*, NULL is returned.
+ * So when value==NULL, this acts like a pure lookup routine, and when
+ * value!=NULL, this acts like dict.setdefault(), returning an existing
+ * mapping if one exists, else creating a new mapping.
+ *
+ * Caution:  this used to be too clever, trying to hold keymutex only
+ * around the "p->next = keyhead; keyhead = p" pair.  That allowed
+ * another thread to mutate the list, via key deletion, concurrent with
+ * find_key() crawling over the list.  Hilarity ensued.  For example, when
+ * the for-loop here does "p = p->next", p could end up pointing at a
+ * record that PyThread_delete_key_value() was concurrently free()'ing.
+ * That could lead to anything, from failing to find a key that exists, to
+ * segfaults.  Now we lock the whole routine.
+ */
+static struct key *
+find_key(int key, void *value)
+{
+    struct key *p, *prev_p;
+    long id = PyThread_get_thread_ident();
+
+    if (!keymutex)
+        return NULL;
+    PyThread_acquire_lock(keymutex, 1);
+    prev_p = NULL;
+    for (p = keyhead; p != NULL; p = p->next) {
+        if (p->id == id && p->key == key)
+            goto Done;
+        /* Sanity check.  These states should never happen but if
+         * they do we must abort.  Otherwise we'll end up spinning in
+         * in a tight loop with the lock held.  A similar check is done
+         * in pystate.c tstate_delete_common().  */
+        if (p == prev_p)
+            Py_FatalError("tls find_key: small circular list(!)");
+        prev_p = p;
+        if (p->next == keyhead)
+            Py_FatalError("tls find_key: circular list(!)");
+    }
+    if (value == NULL) {
+        assert(p == NULL);
+        goto Done;
+    }
+    p = (struct key *)malloc(sizeof(struct key));
+    if (p != NULL) {
+        p->id = id;
+        p->key = key;
+        p->value = value;
+        p->next = keyhead;
+        keyhead = p;
+    }
+ Done:
+    PyThread_release_lock(keymutex);
+    return p;
+}
+
+/* Return a new key.  This must be called before any other functions in
+ * this family, and callers must arrange to serialize calls to this
+ * function.  No violations are detected.
+ */
+int
+PyThread_create_key(void)
+{
+    /* All parts of this function are wrong if it's called by multiple
+     * threads simultaneously.
+     */
+    if (keymutex == NULL)
+        keymutex = PyThread_allocate_lock();
+    return ++nkeys;
+}
+
+/* Forget the associations for key across *all* threads. */
+void
+PyThread_delete_key(int key)
+{
+    struct key *p, **q;
+
+    PyThread_acquire_lock(keymutex, 1);
+    q = &keyhead;
+    while ((p = *q) != NULL) {
+        if (p->key == key) {
+            *q = p->next;
+            free((void *)p);
+            /* NB This does *not* free p->value! */
+        }
+        else
+            q = &p->next;
+    }
+    PyThread_release_lock(keymutex);
+}
+
+/* Confusing:  If the current thread has an association for key,
+ * value is ignored, and 0 is returned.  Else an attempt is made to create
+ * an association of key to value for the current thread.  0 is returned
+ * if that succeeds, but -1 is returned if there's not enough memory
+ * to create the association.  value must not be NULL.
+ */
+int
+PyThread_set_key_value(int key, void *value)
+{
+    struct key *p;
+
+    assert(value != NULL);
+    p = find_key(key, value);
+    if (p == NULL)
+        return -1;
+    else
+        return 0;
+}
+
+/* Retrieve the value associated with key in the current thread, or NULL
+ * if the current thread doesn't have an association for key.
+ */
+void *
+PyThread_get_key_value(int key)
+{
+    struct key *p = find_key(key, NULL);
+
+    if (p == NULL)
+        return NULL;
+    else
+        return p->value;
+}
+
+/* Forget the current thread's association for key, if any. */
+void
+PyThread_delete_key_value(int key)
+{
+    long id = PyThread_get_thread_ident();
+    struct key *p, **q;
+
+    PyThread_acquire_lock(keymutex, 1);
+    q = &keyhead;
+    while ((p = *q) != NULL) {
+        if (p->key == key && p->id == id) {
+            *q = p->next;
+            free((void *)p);
+            /* NB This does *not* free p->value! */
+            break;
+        }
+        else
+            q = &p->next;
+    }
+    PyThread_release_lock(keymutex);
+}
+
+/* Forget everything not associated with the current thread id.
+ * This function is called from PyOS_AfterFork().  It is necessary
+ * because other thread ids which were in use at the time of the fork
+ * may be reused for new threads created in the forked process.
+ */
+void
+PyThread_ReInitTLS(void)
+{
+    long id = PyThread_get_thread_ident();
+    struct key *p, **q;
+
+    if (!keymutex)
+        return;
+
+    /* As with interpreter_lock in PyEval_ReInitThreads()
+       we just create a new lock without freeing the old one */
+    keymutex = PyThread_allocate_lock();
+
+    /* Delete all keys which do not match the current thread id */
+    q = &keyhead;
+    while ((p = *q) != NULL) {
+        if (p->id != id) {
+            *q = p->next;
+            free((void *)p);
+            /* NB This does *not* free p->value! */
+        }
+        else
+            q = &p->next;
+    }
+}
+
+#endif  /* !MS_WINDOWS */

pypy/module/cpyext/src/thread.c

-#include <Python.h>
-#include "pythread.h"
-
-/* With PYPY_NOT_MAIN_FILE only declarations are imported */
-#define PYPY_NOT_MAIN_FILE
-#include "src/thread.h"
-
-long
-PyThread_get_thread_ident(void)
-{
-    return RPyThreadGetIdent();
-}
-
-PyThread_type_lock
-PyThread_allocate_lock(void)
-{
-    struct RPyOpaque_ThreadLock *lock;
-    lock = malloc(sizeof(struct RPyOpaque_ThreadLock));
-    if (lock == NULL)
-        return NULL;
-
-    if (RPyThreadLockInit(lock) == 0) {
-        free(lock);
-        return NULL;
-    }
-
-    return (PyThread_type_lock)lock;
-}
-
-void
-PyThread_free_lock(PyThread_type_lock lock)
-{
-    struct RPyOpaque_ThreadLock *real_lock = lock;
-    RPyThreadAcquireLock(real_lock, 0);
-    RPyThreadReleaseLock(real_lock);
-    RPyOpaqueDealloc_ThreadLock(real_lock);
-    free(lock);
-}
-
-int
-PyThread_acquire_lock(PyThread_type_lock lock, int waitflag)
-{
-    return RPyThreadAcquireLock((struct RPyOpaqueThreadLock*)lock, waitflag);
-}
-
-void
-PyThread_release_lock(PyThread_type_lock lock)
-{
-    RPyThreadReleaseLock((struct RPyOpaqueThreadLock*)lock);
-}
-
-
-/* ------------------------------------------------------------------------
-Per-thread data ("key") support.
-
-Use PyThread_create_key() to create a new key.  This is typically shared
-across threads.
-
-Use PyThread_set_key_value(thekey, value) to associate void* value with
-thekey in the current thread.  Each thread has a distinct mapping of thekey
-to a void* value.  Caution:  if the current thread already has a mapping
-for thekey, value is ignored.
-
-Use PyThread_get_key_value(thekey) to retrieve the void* value associated
-with thekey in the current thread.  This returns NULL if no value is
-associated with thekey in the current thread.
-
-Use PyThread_delete_key_value(thekey) to forget the current thread's associated
-value for thekey.  PyThread_delete_key(thekey) forgets the values associated
-with thekey across *all* threads.
-
-While some of these functions have error-return values, none set any
-Python exception.
-
-None of the functions does memory management on behalf of the void* values.
-You need to allocate and deallocate them yourself.  If the void* values
-happen to be PyObject*, these functions don't do refcount operations on
-them either.
-
-The GIL does not need to be held when calling these functions; they supply
-their own locking.  This isn't true of PyThread_create_key(), though (see
-next paragraph).
-
-There's a hidden assumption that PyThread_create_key() will be called before
-any of the other functions are called.  There's also a hidden assumption
-that calls to PyThread_create_key() are serialized externally.
------------------------------------------------------------------------- */
-
-#ifdef MS_WINDOWS
-#include <windows.h>
-
-/* use native Windows TLS functions */
-#define Py_HAVE_NATIVE_TLS
-
-int
-PyThread_create_key(void)
-{
-    return (int) TlsAlloc();
-}
-
-void
-PyThread_delete_key(int key)
-{
-    TlsFree(key);
-}
-
-/* We must be careful to emulate the strange semantics implemented in thread.c,
- * where the value is only set if it hasn't been set before.
- */
-int
-PyThread_set_key_value(int key, void *value)
-{
-    BOOL ok;
-    void *oldvalue;
-
-    assert(value != NULL);
-    oldvalue = TlsGetValue(key);
-    if (oldvalue != NULL)
-        /* ignore value if already set */
-        return 0;
-    ok = TlsSetValue(key, value);
-    if (!ok)
-        return -1;
-    return 0;
-}
-
-void *
-PyThread_get_key_value(int key)
-{
-    /* because TLS is used in the Py_END_ALLOW_THREAD macro,
-     * it is necessary to preserve the windows error state, because
-     * it is assumed to be preserved across the call to the macro.
-     * Ideally, the macro should be fixed, but it is simpler to
-     * do it here.
-     */
-    DWORD error = GetLastError();
-    void *result = TlsGetValue(key);
-    SetLastError(error);
-    return result;
-}
-
-void
-PyThread_delete_key_value(int key)
-{
-    /* NULL is used as "key missing", and it is also the default
-     * given by TlsGetValue() if nothing has been set yet.
-     */
-    TlsSetValue(key, NULL);
-}
-
-/* reinitialization of TLS is not necessary after fork when using
- * the native TLS functions.  And forking isn't supported on Windows either.
- */
-void
-PyThread_ReInitTLS(void)
-{}
-
-#else  /* MS_WINDOWS */
-
-/* A singly-linked list of struct key objects remembers all the key->value
- * associations.  File static keyhead heads the list.  keymutex is used
- * to enforce exclusion internally.
- */
-struct key {
-    /* Next record in the list, or NULL if this is the last record. */
-    struct key *next;
-
-    /* The thread id, according to PyThread_get_thread_ident(). */
-    long id;
-
-    /* The key and its associated value. */
-    int key;
-    void *value;
-};
-
-static struct key *keyhead = NULL;
-static PyThread_type_lock keymutex = NULL;
-static int nkeys = 0;  /* PyThread_create_key() hands out nkeys+1 next */
-
-/* Internal helper.
- * If the current thread has a mapping for key, the appropriate struct key*
- * is returned.  NB:  value is ignored in this case!
- * If there is no mapping for key in the current thread, then:
- *     If value is NULL, NULL is returned.
- *     Else a mapping of key to value is created for the current thread,
- *     and a pointer to a new struct key* is returned; except that if
- *     malloc() can't find room for a new struct key*, NULL is returned.
- * So when value==NULL, this acts like a pure lookup routine, and when
- * value!=NULL, this acts like dict.setdefault(), returning an existing
- * mapping if one exists, else creating a new mapping.
- *
- * Caution:  this used to be too clever, trying to hold keymutex only
- * around the "p->next = keyhead; keyhead = p" pair.  That allowed
- * another thread to mutate the list, via key deletion, concurrent with
- * find_key() crawling over the list.  Hilarity ensued.  For example, when
- * the for-loop here does "p = p->next", p could end up pointing at a
- * record that PyThread_delete_key_value() was concurrently free()'ing.
- * That could lead to anything, from failing to find a key that exists, to
- * segfaults.  Now we lock the whole routine.
- */
-static struct key *
-find_key(int key, void *value)
-{
-    struct key *p, *prev_p;
-    long id = PyThread_get_thread_ident();
-
-    if (!keymutex)
-        return NULL;
-    PyThread_acquire_lock(keymutex, 1);
-    prev_p = NULL;
-    for (p = keyhead; p != NULL; p = p->next) {
-        if (p->id == id && p->key == key)
-            goto Done;
-        /* Sanity check.  These states should never happen but if
-         * they do we must abort.  Otherwise we'll end up spinning in
-         * in a tight loop with the lock held.  A similar check is done
-         * in pystate.c tstate_delete_common().  */
-        if (p == prev_p)
-            Py_FatalError("tls find_key: small circular list(!)");
-        prev_p = p;
-        if (p->next == keyhead)
-            Py_FatalError("tls find_key: circular list(!)");
-    }
-    if (value == NULL) {
-        assert(p == NULL);
-        goto Done;
-    }
-    p = (struct key *)malloc(sizeof(struct key));
-    if (p != NULL) {
-        p->id = id;
-        p->key = key;
-        p->value = value;
-        p->next = keyhead;
-        keyhead = p;
-    }
- Done:
-    PyThread_release_lock(keymutex);
-    return p;
-}
-
-/* Return a new key.  This must be called before any other functions in
- * this family, and callers must arrange to serialize calls to this
- * function.  No violations are detected.
- */
-int
-PyThread_create_key(void)
-{
-    /* All parts of this function are wrong if it's called by multiple
-     * threads simultaneously.
-     */
-    if (keymutex == NULL)
-        keymutex = PyThread_allocate_lock();
-    return ++nkeys;
-}
-
-/* Forget the associations for key across *all* threads. */
-void
-PyThread_delete_key(int key)
-{
-    struct key *p, **q;
-
-    PyThread_acquire_lock(keymutex, 1);
-    q = &keyhead;
-    while ((p = *q) != NULL) {
-        if (p->key == key) {
-            *q = p->next;
-            free((void *)p);
-            /* NB This does *not* free p->value! */
-        }
-        else
-            q = &p->next;
-    }
-    PyThread_release_lock(keymutex);
-}
-
-/* Confusing:  If the current thread has an association for key,
- * value is ignored, and 0 is returned.  Else an attempt is made to create
- * an association of key to value for the current thread.  0 is returned
- * if that succeeds, but -1 is returned if there's not enough memory
- * to create the association.  value must not be NULL.
- */
-int
-PyThread_set_key_value(int key, void *value)
-{
-    struct key *p;
-
-    assert(value != NULL);
-    p = find_key(key, value);
-    if (p == NULL)
-        return -1;
-    else
-        return 0;
-}
-
-/* Retrieve the value associated with key in the current thread, or NULL
- * if the current thread doesn't have an association for key.
- */
-void *
-PyThread_get_key_value(int key)
-{
-    struct key *p = find_key(key, NULL);
-
-    if (p == NULL)
-        return NULL;
-    else
-        return p->value;
-}
-
-/* Forget the current thread's association for key, if any. */
-void
-PyThread_delete_key_value(int key)
-{
-    long id = PyThread_get_thread_ident();
-    struct key *p, **q;
-
-    PyThread_acquire_lock(keymutex, 1);
-    q = &keyhead;
-    while ((p = *q) != NULL) {
-        if (p->key == key && p->id == id) {
-            *q = p->next;
-            free((void *)p);
-            /* NB This does *not* free p->value! */
-            break;
-        }
-        else
-            q = &p->next;
-    }
-    PyThread_release_lock(keymutex);
-}
-
-/* Forget everything not associated with the current thread id.
- * This function is called from PyOS_AfterFork().  It is necessary
- * because other thread ids which were in use at the time of the fork
- * may be reused for new threads created in the forked process.
- */
-void
-PyThread_ReInitTLS(void)
-{
-    long id = PyThread_get_thread_ident();
-    struct key *p, **q;
-
-    if (!keymutex)
-        return;
-
-    /* As with interpreter_lock in PyEval_ReInitThreads()
-       we just create a new lock without freeing the old one */
-    keymutex = PyThread_allocate_lock();
-
-    /* Delete all keys which do not match the current thread id */
-    q = &keyhead;
-    while ((p = *q) != NULL) {
-        if (p->id != id) {
-            *q = p->next;
-            free((void *)p);
-            /* NB This does *not* free p->value! */
-        }
-        else
-            q = &p->next;
-    }
-}
-
-#endif  /* !MS_WINDOWS */

pypy/module/signal/interp_signal.py

 if sys.platform != 'win32':
     includes.append('sys/time.h')
 
+cdir = py.path.local(autopath.pypydir).join('translator', 'c')
+
 eci = ExternalCompilationInfo(
     includes = includes,
-    separate_module_sources = ['#include <src/signals.h>'],
-    include_dirs = [str(py.path.local(autopath.pypydir).join('translator', 'c'))],
+    separate_module_files = [cdir / 'src' / 'signals.c'],
+    include_dirs = [str(cdir)],
     export_symbols = ['pypysig_poll', 'pypysig_default',
                       'pypysig_ignore', 'pypysig_setflag',
                       'pypysig_reinstall',

pypy/module/thread/ll_thread.py

 class error(Exception):
     pass
 
+pypydir = py.path.local(autopath.pypydir)
+translator_c_dir = pypydir / 'translator' / 'c'
+
 eci = ExternalCompilationInfo(
     includes = ['src/thread.h'],
-    separate_module_sources = [''],
-    include_dirs = [str(py.path.local(autopath.pypydir).join('translator', 'c'))],
+    separate_module_files = [translator_c_dir / 'src' / 'thread.c'],
+    include_dirs = [translator_c_dir],
     export_symbols = ['RPyThreadGetIdent', 'RPyThreadLockInit',
                       'RPyThreadAcquireLock', 'RPyThreadAcquireLockTimed',
                       'RPyThreadReleaseLock', 'RPyGilAllocate',

pypy/rlib/_rffi_stacklet.py

 eci = ExternalCompilationInfo(
     include_dirs = [cdir],
     includes = ['src/stacklet/stacklet.h'],
-    separate_module_sources = ['#include "src/stacklet/stacklet.c"\n'],
+    separate_module_files = [cdir / 'src' / 'stacklet' / 'stacklet.c'],
 )
 if 'masm' in dir(eci.platform): # Microsoft compiler
     if is_emulated_long:

pypy/rlib/rstack.py

 
 import inspect
 
+import py
+
 from pypy.rlib.objectmodel import we_are_translated
 from pypy.rlib.rarithmetic import r_uint
 from pypy.rlib import rgc
 from pypy.rpython.lltypesystem import lltype, rffi
 from pypy.rpython.lltypesystem.lloperation import llop
 from pypy.rpython.controllerentry import Controller, SomeControlledInstance
+from pypy.tool.autopath import pypydir
 from pypy.translator.tool.cbuild import ExternalCompilationInfo
 
 # ____________________________________________________________
 
-compilation_info = ExternalCompilationInfo(includes=['src/stack.h'])
+srcdir = py.path.local(pypydir) / 'translator' / 'c' / 'src'
+compilation_info = ExternalCompilationInfo(
+        includes=['src/stack.h'],
+        separate_module_files=[srcdir / 'stack.c', srcdir / 'threadlocal.c'])
 
 def llexternal(name, args, res, _callable=None):
     return rffi.llexternal(name, args, res, compilation_info=compilation_info,

pypy/rpython/module/ll_strtod.py

     _compilation_info_ = ExternalCompilationInfo(
         includes = ['src/ll_strtod.h'],
         include_dirs = [str(py.path.local(pypydir).join('translator', 'c'))],
-        separate_module_sources = ['#include <src/ll_strtod.h>'],
+        separate_module_sources = ['#include <src/ll_strtod.c>'],
         export_symbols = ['LL_strtod_formatd', 'LL_strtod_parts_to_float'],
     )
 

pypy/rpython/tool/rffi_platform.py

 """
 
 def run_example_code(filepath, eci, ignore_errors=False):
-    eci = eci.convert_sources_to_files(being_main=True)
+    eci = eci.convert_sources_to_files()
     files = [filepath]
     output = build_executable_cache(files, eci, ignore_errors=ignore_errors)
     section = None

pypy/translator/c/funcgen.py

 
     def OP_GETSUBSTRUCT(self, op):
         RESULT = self.lltypemap(op.result).TO
-        if isinstance(RESULT, FixedSizeArray):
+        if (isinstance(RESULT, FixedSizeArray) or
+                (isinstance(RESULT, Array) and barebonearray(RESULT))):
             return self.OP_GETFIELD(op, ampersand='')
         else:
             return self.OP_GETFIELD(op, ampersand='&')
         self.db.instrument_ncounter = max(self.db.instrument_ncounter,
                                           counter_label+1)
         counter_label = self.expr(op.args[1])
-        return 'INSTRUMENT_COUNT(%s);' % counter_label
+        return 'PYPY_INSTRUMENT_COUNT(%s);' % counter_label
             
     def OP_IS_EARLY_CONSTANT(self, op):
         return '%s = 0; /* IS_EARLY_CONSTANT */' % (self.expr(op.result),)

pypy/translator/c/gc.py

 
         eci = eci.merge(ExternalCompilationInfo(
             pre_include_bits=pre_include_bits,
-            post_include_bits=['#define USING_BOEHM_GC'],
+            # The following define is required by the thread module,
+            # See module/thread/test/test_ll_thread.py
+            compile_extra=['-DPYPY_USING_BOEHM_GC'],
             ))
 
         return eci
     def compilation_info(self):
         eci = BasicGcPolicy.compilation_info(self)
         eci = eci.merge(ExternalCompilationInfo(
-            post_include_bits=['#define USING_NO_GC_AT_ALL'],
+            post_include_bits=['#define PYPY_USING_NO_GC_AT_ALL'],
             ))
         return eci
 

pypy/translator/c/genc.py

 import autopath
+import contextlib
 import py
 import sys, os
 from pypy.rlib import exports
         else:
             defines['PYPY_STANDALONE'] = db.get(pf)
             if self.config.translation.instrument:
-                defines['INSTRUMENT'] = 1
+                defines['PYPY_INSTRUMENT'] = 1
             if CBuilder.have___thread:
                 if not self.config.translation.no__thread:
                     defines['USE___THREAD'] = 1
             ('clean_noprof', '', 'rm -f $(OBJECTS) $(TARGET) $(GCMAPFILES) $(ASMFILES)'),
             ('debug', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT" debug_target'),
             ('debug_exc', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DDO_LOG_EXC" debug_target'),
-            ('debug_mem', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DTRIVIAL_MALLOC_DEBUG" debug_target'),
-            ('no_obmalloc', '', '$(MAKE) CFLAGS="-g -O2 -DRPY_ASSERT -DNO_OBMALLOC" $(TARGET)'),
-            ('linuxmemchk', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DLINUXMEMCHK" debug_target'),
+            ('debug_mem', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DPYPY_USE_TRIVIAL_MALLOC" debug_target'),
+            ('no_obmalloc', '', '$(MAKE) CFLAGS="-g -O2 -DRPY_ASSERT -DPYPY_NO_OBMALLOC" $(TARGET)'),
+            ('linuxmemchk', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DPPY_USE_LINUXMEMCHK" debug_target'),
             ('llsafer', '', '$(MAKE) CFLAGS="-O2 -DRPY_LL_ASSERT" $(TARGET)'),
             ('lldebug', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DRPY_LL_ASSERT" debug_target'),
             ('profile', '', '$(MAKE) CFLAGS="-g -O1 -pg $(CFLAGS) -fno-omit-frame-pointer" LDFLAGS="-pg $(LDFLAGS)" $(TARGET)'),
 class SourceGenerator:
     one_source_file = True
 
-    def __init__(self, database, preimplementationlines=[]):
+    def __init__(self, database):
         self.database = database
-        self.preimpl = preimplementationlines
         self.extrafiles = []
         self.path = None
         self.namespace = NameManager()
                 funcnodes.append(node)
             else:
                 othernodes.append(node)
-        # for now, only split for stand-alone programs.
-        #if self.database.standalone:
         if split:
             self.one_source_file = False
         self.funcnodes = funcnodes
                         return relpypath.replace('.py', '.c')
             return None
         if hasattr(node.obj, 'graph'):
+            # Regular RPython functions
             name = invent_nice_name(node.obj.graph)
             if name is not None:
                 return name
         elif node._funccodegen_owner is not None:
-            name = invent_nice_name(node._funccodegen_owner.graph)
+            # Data nodes that belong to a known function
+            graph = getattr(node._funccodegen_owner, 'graph', None)
+            name = invent_nice_name(graph)
             if name is not None:
                 return "data_" + name
         return basecname
 
         # produce a sequence of nodes, grouped into files
         # which have no more than SPLIT_CRITERIA lines
-        for basecname in nodes_by_base_cfile:
+        for basecname in sorted(nodes_by_base_cfile):
             iternodes = iter(nodes_by_base_cfile[basecname])
             done = [False]
             def subiter():
             while not done[0]:
                 yield self.uniquecname(basecname), subiter()
 
+    @contextlib.contextmanager
+    def write_on_included_file(self, f, name):
+        fi = self.makefile(name)
+        print >> f, '#include "%s"' % name
+        yield fi
+        fi.close()
+
+    @contextlib.contextmanager
+    def write_on_maybe_separate_source(self, f, name):
+        print >> f, '/* %s */' % name
+        if self.one_source_file:
+            yield f
+        else:
+            fi = self.makefile(name)
+            yield fi
+            fi.close()
+
     def gen_readable_parts_of_source(self, f):
         split_criteria_big = SPLIT_CRITERIA
         if py.std.sys.platform != "win32":
                 pass    # XXX gcc uses toooooons of memory???
             else:
                 split_criteria_big = SPLIT_CRITERIA * 4 
-        if self.one_source_file:
-            return gen_readable_parts_of_main_c_file(f, self.database,
-                                                     self.preimpl)
+
         #
         # All declarations
         #
-        database = self.database
-        structdeflist = database.getstructdeflist()
-        name = 'structdef.h'
-        fi = self.makefile(name)
-        print >> f, '#include "%s"' % name
-        gen_structdef(fi, database)
-        fi.close()
-        name = 'forwarddecl.h'
-        fi = self.makefile(name)
-        print >> f, '#include "%s"' % name
-        gen_forwarddecl(fi, database)
-        fi.close()
+        with self.write_on_included_file(f, 'structdef.h') as fi:
+            gen_structdef(fi, self.database)
+        with self.write_on_included_file(f, 'forwarddecl.h') as fi:
+            gen_forwarddecl(fi, self.database)
+        with self.write_on_included_file(f, 'preimpl.h') as fi:
+            gen_preimpl(fi, self.database)
 
         #
         # Implementation of functions and global structures and arrays
         print >> f, '/***********************************************************/'
         print >> f, '/***  Implementations                                    ***/'
         print >> f
-        for line in self.preimpl:
-            print >> f, line
+
+        print >> f, '#define PYPY_FILE_NAME "%s"' % os.path.basename(f.name)
         print >> f, '#include "src/g_include.h"'
         print >> f
-        name = self.uniquecname('structimpl.c')
-        print >> f, '/* %s */' % name
-        fc = self.makefile(name)
-        print >> fc, '/***********************************************************/'
-        print >> fc, '/***  Structure Implementations                          ***/'
-        print >> fc
-        print >> fc, '#define PYPY_NOT_MAIN_FILE'
-        print >> fc, '#include "common_header.h"'
-        print >> fc, '#include "structdef.h"'
-        print >> fc, '#include "forwarddecl.h"'
-        print >> fc
-        print >> fc, '#include "src/g_include.h"'
-        print >> fc
-        print >> fc, MARKER
-
-        print >> fc, '/***********************************************************/'
-        fc.close()
 
         nextralines = 11 + 1
         for name, nodeiter in self.splitnodesimpl('nonfuncnodes.c',
                                                    self.othernodes,
                                                    nextralines, 1):
-            print >> f, '/* %s */' % name
-            fc = self.makefile(name)
-            print >> fc, '/***********************************************************/'
-            print >> fc, '/***  Non-function Implementations                       ***/'
-            print >> fc
-            print >> fc, '#define PYPY_NOT_MAIN_FILE'
-            print >> fc, '#include "common_header.h"'
-            print >> fc, '#include "structdef.h"'
-            print >> fc, '#include "forwarddecl.h"'
-            print >> fc
-            print >> fc, '#include "src/g_include.h"'
-            print >> fc
-            print >> fc, MARKER
-            for node, impl in nodeiter:
-                print >> fc, '\n'.join(impl)
+            with self.write_on_maybe_separate_source(f, name) as fc:
+                if fc is not f:
+                    print >> fc, '/***********************************************************/'
+                    print >> fc, '/***  Non-function Implementations                       ***/'
+                    print >> fc
+                    print >> fc, '#include "common_header.h"'
+                    print >> fc, '#include "structdef.h"'
+                    print >> fc, '#include "forwarddecl.h"'
+                    print >> fc, '#include "preimpl.h"'
+                    print >> fc
+                    print >> fc, '#include "src/g_include.h"'
+                    print >> fc
                 print >> fc, MARKER
-            print >> fc, '/***********************************************************/'
-            fc.close()
+                for node, impl in nodeiter:
+                    print >> fc, '\n'.join(impl)
+                    print >> fc, MARKER
+                print >> fc, '/***********************************************************/'
 
-        nextralines = 8 + len(self.preimpl) + 4 + 1
+        nextralines = 12
         for name, nodeiter in self.splitnodesimpl('implement.c',
                                                    self.funcnodes,
                                                    nextralines, 1,
                                                    split_criteria_big):
-            print >> f, '/* %s */' % name
-            fc = self.makefile(name)
-            print >> fc, '/***********************************************************/'
-            print >> fc, '/***  Implementations                                    ***/'
-            print >> fc
-            print >> fc, '#define PYPY_NOT_MAIN_FILE'
-            print >> fc, '#define PYPY_FILE_NAME "%s"' % name
-            print >> fc, '#include "common_header.h"'
-            print >> fc, '#include "structdef.h"'
-            print >> fc, '#include "forwarddecl.h"'
-            print >> fc
-            for line in self.preimpl:
-                print >> fc, line
-            print >> fc
-            print >> fc, '#include "src/g_include.h"'
-            print >> fc
-            print >> fc, MARKER
-            for node, impl in nodeiter:
-                print >> fc, '\n'.join(impl)
+            with self.write_on_maybe_separate_source(f, name) as fc:
+                if fc is not f:
+                    print >> fc, '/***********************************************************/'
+                    print >> fc, '/***  Implementations                                    ***/'
+                    print >> fc
+                    print >> fc, '#define PYPY_FILE_NAME "%s"' % name
+                    print >> fc, '#include "common_header.h"'
+                    print >> fc, '#include "structdef.h"'
+                    print >> fc, '#include "forwarddecl.h"'
+                    print >> fc, '#include "preimpl.h"'
+                    print >> fc, '#include "src/g_include.h"'
+                    print >> fc
                 print >> fc, MARKER
-            print >> fc, '/***********************************************************/'
-            fc.close()
+                for node, impl in nodeiter:
+                    print >> fc, '\n'.join(impl)
+                    print >> fc, MARKER
+                print >> fc, '/***********************************************************/'
         print >> f
 
 
         for line in node.forward_declaration():
             print >> f, line
 
-# this function acts as the fallback for small sources for now.
-# Maybe we drop this completely if source splitting is the way
-# to go. Currently, I'm quite fine with keeping a working fallback.
-# XXX but we need to reduce code duplication.
-
-def gen_readable_parts_of_main_c_file(f, database, preimplementationlines=[]):
-    #
-    # All declarations
-    #
-    print >> f
-    gen_structdef(f, database)
-    print >> f
-    gen_forwarddecl(f, database)
-
-    #
-    # Implementation of functions and global structures and arrays
-    #
-    print >> f
-    print >> f, '/***********************************************************/'
-    print >> f, '/***  Implementations                                    ***/'
-    print >> f
-    print >> f, '#define PYPY_FILE_NAME "%s"' % os.path.basename(f.name)
+def gen_preimpl(f, database):
+    if database.translator is None or database.translator.rtyper is None:
+        return
+    preimplementationlines = pre_include_code_lines(
+        database, database.translator.rtyper)
     for line in preimplementationlines:
         print >> f, line
-    print >> f, '#include "src/g_include.h"'
-    print >> f
-    blank = True
-    graphs = database.all_graphs()
-    database.gctransformer.prepare_inline_helpers(graphs)
-    for node in database.globalcontainers():
-        if blank:
-            print >> f
-            blank = False
-        for line in node.implementation():
-            print >> f, line
-            blank = True
 
 def gen_startupcode(f, database):
     # generate the start-up code and put it into a function
 def add_extra_files(eci):
     srcdir = py.path.local(autopath.pypydir).join('translator', 'c', 'src')
     files = [
+        srcdir / 'entrypoint.c',       # ifdef PYPY_STANDALONE
+        srcdir / 'allocator.c',        # ifdef PYPY_STANDALONE
+        srcdir / 'mem.c',
+        srcdir / 'exception.c',
+        srcdir / 'rtyper.c',           # ifdef HAVE_RTYPER
+        srcdir / 'support.c',
         srcdir / 'profiling.c',
         srcdir / 'debug_print.c',
+        srcdir / 'debug_traceback.c',  # ifdef HAVE_RTYPER
+        srcdir / 'asm.c',
+        srcdir / 'instrument.c',
+        srcdir / 'int.c',
     ]
     if _CYGWIN:
         files.append(srcdir / 'cygwin_wait.c')
 
     fi.close()
 
-    if database.translator is None or database.translator.rtyper is None:
-        preimplementationlines = []
-    else:
-        preimplementationlines = list(
-            pre_include_code_lines(database, database.translator.rtyper))
-
     #
     # 1) All declarations
     # 2) Implementation of functions and global structures and arrays
     #
-    sg = SourceGenerator(database, preimplementationlines)
+    sg = SourceGenerator(database)
     sg.set_strategy(targetdir, split)
-    if split:
-        database.prepare_inline_helpers()
+    database.prepare_inline_helpers()
     sg.gen_readable_parts_of_source(f)
 
     gen_startupcode(f, database)
     f.close()
 
-    if 'INSTRUMENT' in defines:
+    if 'PYPY_INSTRUMENT' in defines:
         fi = incfilename.open('a')
         n = database.instrument_ncounter
-        print >>fi, "#define INSTRUMENT_NCOUNTER %d" % n
+        print >>fi, "#define PYPY_INSTRUMENT_NCOUNTER %d" % n
         fi.close()
 
     eci = add_extra_files(eci)
-    eci = eci.convert_sources_to_files(being_main=True)
+    eci = eci.convert_sources_to_files()
     files, eci = eci.get_module_files()
     return eci, filename, sg.getextrafiles() + list(files)

pypy/translator/c/node.py

 
     def initializationexpr(self, decoration=''):
         T = self.getTYPE()
-        yield 'RPyOpaque_INITEXPR_%s' % (T.tag,)
+        raise NotImplementedError(
+            'seeing an unexpected prebuilt object: %s' % (T.tag,))
 
     def startupcode(self):
         T = self.getTYPE()

pypy/translator/c/src/allocator.c

+/* allocation functions */
+#include "common_header.h"
+#ifdef PYPY_STANDALONE
+#include <malloc.h>
+#include <stdlib.h>
+
+#if defined(PYPY_USE_TRIVIAL_MALLOC)
+  void *PyObject_Malloc(size_t n) { return malloc(n); }
+  void *PyObject_Realloc(void *p, size_t n) { return realloc(p, n); }
+  void PyObject_Free(void *p) { if (p) { *((int*)p) = 0xDDDDDDDD; } free(p); }
+
+#elif defined(PYPY_USE_LINUXMEMCHK)
+#  include "linuxmemchk.c"
+
+#elif defined(PYPY_NO_OBMALLOC)
+  void *PyObject_Malloc(size_t n) { return malloc(n); }
+  void *PyObject_Realloc(void *p, size_t n) { return realloc(p, n); }
+  void PyObject_Free(void *p) { free(p); }
+
+#else
+#  ifndef WITH_PYMALLOC
+#    define WITH_PYMALLOC
+#  endif
+/* The same obmalloc as CPython */
+#  include "src/obmalloc.c"
+
+#endif
+
+#endif  /* PYPY_STANDALONE */

pypy/translator/c/src/allocator.h

-
+#ifdef PYPY_STANDALONE 
 /* allocation functions prototypes */
 void *PyObject_Malloc(size_t n);
 void *PyObject_Realloc(void *p, size_t n);
 void PyObject_Free(void *p);
 
-
-#ifndef PYPY_NOT_MAIN_FILE
-
-#if defined(TRIVIAL_MALLOC_DEBUG)
-  void *PyObject_Malloc(size_t n) { return malloc(n); }
-  void *PyObject_Realloc(void *p, size_t n) { return realloc(p, n); }
-  void PyObject_Free(void *p) { if (p) { *((int*)p) = 0xDDDDDDDD; } free(p); }
-
-#elif defined(LINUXMEMCHK)
-#  include "linuxmemchk.c"
-
-#elif defined(NO_OBMALLOC)
-  void *PyObject_Malloc(size_t n) { return malloc(n); }
-  void *PyObject_Realloc(void *p, size_t n) { return realloc(p, n); }
-  void PyObject_Free(void *p) { free(p); }
-
-#else
-#  ifndef WITH_PYMALLOC
-#    define WITH_PYMALLOC
-#  endif
-#  include "obmalloc.c"
-
-#endif
-
-#endif
+#endif  /* PYPY_STANDALONE */

pypy/translator/c/src/asm.c

+/* optional assembler bits */
+#if defined(__GNUC__) && defined(__i386__)
+#  include "src/asm_gcc_x86.c"
+#endif
+
+#if defined(__GNUC__) && defined(__amd64__)
+/* No implementation for the moment. */
+/* #  include "src/asm_gcc_x86_64.c" */
+#endif
+
+#if defined(__GNUC__) && defined(__ppc__)
+#  include "src/asm_ppc.c"
+#endif
+
+#if defined(MS_WINDOWS) && defined(_MSC_VER)
+#  include "src/asm_msvc.c"
+#endif

pypy/translator/c/src/asm_gcc_x86.c

+/* This optional file only works for GCC on an i386.
+ * It replaces some complex macros with native assembler instructions.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#  if 0   /* disabled */
+void op_int_overflowed(void)
+{
+  FAIL_OVF("integer operation");
+}
+#  endif
+
+#  ifdef PYPY_X86_CHECK_SSE2
+void pypy_x86_check_sse2(void)
+{
+    //Read the CPU features.
+    int features;
+    asm("movl $1, %%eax\n"
+        "pushl %%ebx\n"
+        "cpuid\n"
+        "popl %%ebx\n"
+        "movl %%edx, %0"
+        : "=g"(features) : : "eax", "edx", "ecx");
+    
+    //Check bits 25 and 26, this indicates SSE2 support
+    if (((features & (1 << 25)) == 0) || ((features & (1 << 26)) == 0))
+    {
+        fprintf(stderr, "Old CPU with no SSE2 support, cannot continue.\n"
+                        "You need to re-translate with "
+                        "'--jit-backend=x86-without-sse2'\n");
+        abort();
+    }
+}
+#  endif

pypy/translator/c/src/asm_gcc_x86.h

 #define PYPY_X86_CHECK_SSE2_DEFINED
 extern void pypy_x86_check_sse2(void);
 #endif
-
-
-/* implementations */
-
-#ifndef PYPY_NOT_MAIN_FILE
-
-#  if 0   /* disabled */
-void op_int_overflowed(void)
-{
-  FAIL_OVF("integer operation");
-}
-#  endif
-
-#  ifdef PYPY_X86_CHECK_SSE2
-void pypy_x86_check_sse2(void)
-{
-    //Read the CPU features.
-    int features;
-    asm("movl $1, %%eax\n"
-        "pushl %%ebx\n"
-        "cpuid\n"
-        "popl %%ebx\n"
-        "movl %%edx, %0"
-        : "=g"(features) : : "eax", "edx", "ecx");
-    
-    //Check bits 25 and 26, this indicates SSE2 support
-    if (((features & (1 << 25)) == 0) || ((features & (1 << 26)) == 0))
-    {
-        fprintf(stderr, "Old CPU with no SSE2 support, cannot continue.\n"
-                        "You need to re-translate with "
-                        "'--jit-backend=x86-without-sse2'\n");
-        abort();
-    }
-}
-#  endif
-
-#endif

pypy/translator/c/src/asm_msvc.c

+#ifdef PYPY_X86_CHECK_SSE2
+#include <intrin.h>
+void pypy_x86_check_sse2(void)
+{
+    int features;
+    int CPUInfo[4];
+    CPUInfo[3] = 0;
+    __cpuid(CPUInfo, 1);
+    features = CPUInfo[3];
+
+    //Check bits 25 and 26, this indicates SSE2 support
+    if (((features & (1 << 25)) == 0) || ((features & (1 << 26)) == 0))
+    {
+        fprintf(stderr, "Old CPU with no SSE2 support, cannot continue.\n"
+                        "You need to re-translate with "
+                        "'--jit-backend=x86-without-sse2'\n");
+        abort();
+    }
+}
+#endif

pypy/translator/c/src/asm_msvc.h

-
 #ifdef PYPY_X86_CHECK_SSE2
 #define PYPY_X86_CHECK_SSE2_DEFINED
 extern void pypy_x86_check_sse2(void);
 #endif
-
-
-/* implementations */
-
-#ifndef PYPY_NOT_MAIN_FILE
-#ifdef PYPY_X86_CHECK_SSE2
-#include <intrin.h>
-void pypy_x86_check_sse2(void)
-{
-    int features;
-    int CPUInfo[4];
-    CPUInfo[3] = 0;
-    __cpuid(CPUInfo, 1);
-    features = CPUInfo[3];
-
-    //Check bits 25 and 26, this indicates SSE2 support
-    if (((features & (1 << 25)) == 0) || ((features & (1 << 26)) == 0))
-    {
-        fprintf(stderr, "Old CPU with no SSE2 support, cannot continue.\n"
-                        "You need to re-translate with "
-                        "'--jit-backend=x86-without-sse2'\n");
-        abort();
-    }
-}
-#endif
-#endif

pypy/translator/c/src/asm_ppc.c

+#include "src/asm_ppc.h"
+
+#define __dcbst(base, index)    \
+  __asm__ ("dcbst %0, %1" : /*no result*/ : "b%" (index), "r" (base) : "memory")
+#define __icbi(base, index)    \
+  __asm__ ("icbi %0, %1" : /*no result*/ : "b%" (index), "r" (base) : "memory")
+#define __sync() __asm__ volatile ("sync")
+#define __isync()       \
+  __asm__ volatile ("isync")
+
+void
+LL_flush_icache(long base, long size)
+{
+	long i;
+
+	for (i = 0; i < size; i += 32){
+		__dcbst(base, i);
+	}
+	__sync();
+	for (i = 0; i < size; i += 32){
+		__icbi(base, i);
+	}
+	__isync();
+}

pypy/translator/c/src/asm_ppc.h

-
 void LL_flush_icache(long base, long size);
-
-#ifndef PYPY_NOT_MAIN_FILE
-
-#define __dcbst(base, index)    \
-  __asm__ ("dcbst %0, %1" : /*no result*/ : "b%" (index), "r" (base) : "memory")
-#define __icbi(base, index)    \
-  __asm__ ("icbi %0, %1" : /*no result*/ : "b%" (index), "r" (base) : "memory")
-#define __sync() __asm__ volatile ("sync")
-#define __isync()       \
-  __asm__ volatile ("isync")
-
-void
-LL_flush_icache(long base, long size)
-{
-	long i;
-
-	for (i = 0; i < size; i += 32){
-		__dcbst(base, i);
-	}
-	__sync();
-	for (i = 0; i < size; i += 32){
-		__icbi(base, i);
-	}
-	__isync();
-}
-
-#endif

pypy/translator/c/src/debug_alloc.h

-/**************************************************************/
-/***  tracking raw mallocs and frees for debugging          ***/
-
-#ifndef RPY_ASSERT
-
-#  define OP_TRACK_ALLOC_START(addr, r)   /* nothing */
-#  define OP_TRACK_ALLOC_STOP(addr, r)    /* nothing */
-
-#else   /* ifdef RPY_ASSERT */
-
-#  define OP_TRACK_ALLOC_START(addr, r)  pypy_debug_alloc_start(addr, \
-                                                                __FUNCTION__)
-#  define OP_TRACK_ALLOC_STOP(addr, r)   pypy_debug_alloc_stop(addr)
-
-void pypy_debug_alloc_start(void*, const char*);
-void pypy_debug_alloc_stop(void*);
-void pypy_debug_alloc_results(void);
-
-/************************************************************/
-
-
-#ifndef PYPY_NOT_MAIN_FILE
-
-struct pypy_debug_alloc_s {
-  struct pypy_debug_alloc_s *next;
-  void *addr;
-  const char *funcname;
-};
-
-static struct pypy_debug_alloc_s *pypy_debug_alloc_list = NULL;
-
-void pypy_debug_alloc_start(void *addr, const char *funcname)
-{
-  struct pypy_debug_alloc_s *p = malloc(sizeof(struct pypy_debug_alloc_s));
-  RPyAssert(p, "out of memory");
-  p->next = pypy_debug_alloc_list;
-  p->addr = addr;
-  p->funcname = funcname;
-  pypy_debug_alloc_list = p;
-}
-
-void pypy_debug_alloc_stop(void *addr)
-{
-  struct pypy_debug_alloc_s **p;
-  for (p = &pypy_debug_alloc_list; *p; p = &((*p)->next))
-    if ((*p)->addr == addr)
-      {
-        struct pypy_debug_alloc_s *dying;
-        dying = *p;
-        *p = dying->next;
-        free(dying);
-        return;
-      }
-  RPyAssert(0, "free() of a never-malloc()ed object");
-}
-
-void pypy_debug_alloc_results(void)
-{
-  long count = 0;
-  struct pypy_debug_alloc_s *p;
-  for (p = pypy_debug_alloc_list; p; p = p->next)
-    count++;
-  if (count > 0)
-    {
-      char *env = getenv("PYPY_ALLOC");
-      fprintf(stderr, "debug_alloc.h: %ld mallocs left", count);
-      if (env && *env)
-        {
-          fprintf(stderr, " (most recent first):\n");
-          for (p = pypy_debug_alloc_list; p; p = p->next)
-            fprintf(stderr, "    %p  %s\n", p->addr, p->funcname);
-        }
-      else
-        fprintf(stderr, " (use PYPY_ALLOC=1 to see the list)\n");
-    }
-}
-
-#endif
-
-
-#endif  /* RPY_ASSERT */

pypy/translator/c/src/debug_print.c

-#define PYPY_NOT_MAIN_FILE
-
 #include <string.h>
 #include <stddef.h>
 #include <stdlib.h>

pypy/translator/c/src/debug_print.h

 /************************************************************/
- /***  C header subsection: debug_print & related tools    ***/
+/***  C header subsection: debug_print & related tools    ***/
+
+#include <stdio.h>
 
 /* values of the PYPYLOG environment variable:
    ("top-level" debug_prints means not between debug_start and debug_stop)

pypy/translator/c/src/debug_traceback.c

+#include "common_header.h"
+#include "structdef.h"
+#include "forwarddecl.h"
+#include "preimpl.h"
+#include "src/debug_traceback.h"
+#include <stdio.h>
+#include <stdlib.h>
+
+int pypydtcount = 0;
+struct pypydtentry_s pypy_debug_tracebacks[PYPY_DEBUG_TRACEBACK_DEPTH];
+
+void pypy_debug_traceback_print(void)
+{
+  int i;
+  int skipping;
+  void *my_etype = RPyFetchExceptionType();
+  struct pypydtpos_s *location;
+  void *etype;
+  int has_loc;
+
+  /* This code parses the pypy_debug_tracebacks array.  See example
+     at the start of the file. */
+  fprintf(stderr, "RPython traceback:\n");
+  skipping = 0;
+  i = pypydtcount;
+  while (1)
+    {
+      i = (i - 1) & (PYPY_DEBUG_TRACEBACK_DEPTH-1);
+      if (i == pypydtcount)
+        {
+          fprintf(stderr, "  ...\n");
+          break;
+        }
+
+      location = pypy_debug_tracebacks[i].location;
+      etype    = pypy_debug_tracebacks[i].exctype;
+      has_loc  = location != NULL && location != PYPYDTPOS_RERAISE;
+
+      if (skipping && has_loc && etype == my_etype)
+        skipping = 0;     /* found the matching "f:17, &KeyError */
+
+      if (!skipping)
+        {
+          if (has_loc)
+            fprintf(stderr, "  File \"%s\", line %d, in %s\n",
+                    location->filename, location->lineno, location->funcname);
+          else
+            {
+              /* line "NULL, &KeyError" or "RERAISE, &KeyError" */
+              if (!my_etype)
+                my_etype = etype;
+              if (etype != my_etype)
+                {
+                  fprintf(stderr, "  Note: this traceback is "
+                                  "incomplete or corrupted!\n");
+                  break;
+                }
+              if (location == NULL)  /* found the place that raised the exc */
+                break;
+              skipping = 1;     /* RERAISE: skip until "f:17, &KeyError" */
+            }
+        }
+    }
+}
+
+void pypy_debug_catch_fatal_exception(void)
+{
+  pypy_debug_traceback_print();
+  fprintf(stderr, "Fatal RPython error: %s\n",
+          RPyFetchExceptionType()->ov_name->items);
+  abort();
+}

pypy/translator/c/src/debug_traceback.h

 
 void pypy_debug_traceback_print(void);
 void pypy_debug_catch_fatal_exception(void);
-
-
-/************************************************************/
-
-
-#ifndef PYPY_NOT_MAIN_FILE
-
-int pypydtcount = 0;
-struct pypydtentry_s pypy_debug_tracebacks[PYPY_DEBUG_TRACEBACK_DEPTH];
-
-void pypy_debug_traceback_print(void)
-{
-  int i;
-  int skipping;
-  void *my_etype = RPyFetchExceptionType();
-  struct pypydtpos_s *location;
-  void *etype;
-  int has_loc;
-
-  /* This code parses the pypy_debug_tracebacks array.  See example
-     at the start of the file. */
-  fprintf(stderr, "RPython traceback:\n");
-  skipping = 0;
-  i = pypydtcount;
-  while (1)
-    {
-      i = (i - 1) & (PYPY_DEBUG_TRACEBACK_DEPTH-1);
-      if (i == pypydtcount)
-        {
-          fprintf(stderr, "  ...\n");
-          break;
-        }
-
-      location = pypy_debug_tracebacks[i].location;
-      etype    = pypy_debug_tracebacks[i].exctype;
-      has_loc  = location != NULL && location != PYPYDTPOS_RERAISE;
-
-      if (skipping && has_loc && etype == my_etype)
-        skipping = 0;     /* found the matching "f:17, &KeyError */
-
-      if (!skipping)
-        {
-          if (has_loc)
-            fprintf(stderr, "  File \"%s\", line %d, in %s\n",
-                    location->filename, location->lineno, location->funcname);
-          else
-            {
-              /* line "NULL, &KeyError" or "RERAISE, &KeyError" */
-              if (!my_etype)
-                my_etype = etype;
-              if (etype != my_etype)
-                {
-                  fprintf(stderr, "  Note: this traceback is "
-                                  "incomplete or corrupted!\n");
-                  break;
-                }
-              if (location == NULL)  /* found the place that raised the exc */
-                break;
-              skipping = 1;     /* RERAISE: skip until "f:17, &KeyError" */
-            }
-        }
-    }
-}
-
-void pypy_debug_catch_fatal_exception(void)
-{
-  pypy_debug_traceback_print();
-  fprintf(stderr, "Fatal RPython error: %s\n",
-          RPyFetchExceptionType()->ov_name->items);
-  abort();
-}
-
-#endif /* PYPY_NOT_MAIN_FILE */

pypy/translator/c/src/dtoa.c

 #include <assert.h>
 #include <stdio.h>
 #include <string.h>
-#define PYPY_NOT_MAIN_FILE
 #include "src/asm.h"
 #define PyMem_Malloc malloc
 #define PyMem_Free free

pypy/translator/c/src/entrypoint.c

+#include "common_header.h"
+#ifdef PYPY_STANDALONE
+#include "structdef.h"
+#include "forwarddecl.h"
+#include "preimpl.h"
+#include <src/entrypoint.h>
+#include <src/commondefs.h>
+#include <src/mem.h>
+#include <src/instrument.h>
+#include <src/rtyper.h>
+#include <src/exception.h>
+#include <src/debug_traceback.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#ifdef __GNUC__
+/* Hack to prevent this function from being inlined.  Helps asmgcc
+   because the main() function has often a different prologue/epilogue. */
+int pypy_main_function(int argc, char *argv[]) __attribute__((__noinline__));
+#endif
+
+int pypy_main_function(int argc, char *argv[])
+{
+    char *errmsg;
+    int i, exitcode;
+    RPyListOfString *list;
+
+    pypy_asm_stack_bottom();
+#ifdef PYPY_X86_CHECK_SSE2_DEFINED
+    pypy_x86_check_sse2();
+#endif
+    instrument_setup();
+
+#ifndef MS_WINDOWS
+    /* this message does no longer apply to win64 :-) */
+    if (sizeof(void*) != SIZEOF_LONG) {
+        errmsg = "only support platforms where sizeof(void*) == sizeof(long),"
+                 " for now";
+        goto error;
+    }
+#endif
+
+    errmsg = RPython_StartupCode();
+    if (errmsg) goto error;
+
+    list = _RPyListOfString_New(argc);
+    if (RPyExceptionOccurred()) goto memory_out;
+    for (i=0; i<argc; i++) {
+        RPyString *s = RPyString_FromString(argv[i]);
+        if (RPyExceptionOccurred()) goto memory_out;
+        _RPyListOfString_SetItem(list, i, s);
+    }
+
+    exitcode = STANDALONE_ENTRY_POINT(list);
+
+    pypy_debug_alloc_results();
+
+    if (RPyExceptionOccurred()) {
+        /* print the RPython traceback */
+        pypy_debug_catch_fatal_exception();
+    }
+
+    pypy_malloc_counters_results();
+
+    return exitcode;
+
+ memory_out:
+    errmsg = "out of memory";
+ error:
+    fprintf(stderr, "Fatal error during initialization: %s\n", errmsg);
+    abort();
+    return 1;
+}
+
+int PYPY_MAIN_FUNCTION(int argc, char *argv[])
+{
+    return pypy_main_function(argc, argv);
+}
+
+#endif  /* PYPY_STANDALONE */

pypy/translator/c/src/entrypoint.h

+#ifdef PYPY_STANDALONE
+
+#ifndef STANDALONE_ENTRY_POINT
+#  define STANDALONE_ENTRY_POINT   PYPY_STANDALONE
+#endif
+
+#ifndef PYPY_MAIN_FUNCTION
+#define PYPY_MAIN_FUNCTION main
+#endif
+
+char *RPython_StartupCode(void);
+int PYPY_MAIN_FUNCTION(int argc, char *argv[]);
+#endif  /* PYPY_STANDALONE */

pypy/translator/c/src/exception.c

+#include "common_header.h"
+#include "structdef.h"
+#include "forwarddecl.h"
+#include "preimpl.h"
+#include "src/exception.h"
+
+#if defined(PYPY_CPYTHON_EXTENSION)
+   PyObject *RPythonError;
+#endif 
+
+/******************************************************************/
+#ifdef HAVE_RTYPER               /* RPython version of exceptions */
+/******************************************************************/
+
+void RPyDebugReturnShowException(const char *msg, const char *filename,
+                                 long lineno, const char *functionname)
+{
+#ifdef DO_LOG_EXC
+  fprintf(stderr, "%s %s: %s:%ld %s\n", msg,
+          RPyFetchExceptionType()->ov_name->items,
+          filename, lineno, functionname);
+#endif
+}
+
+/* Hint: functions and macros not defined here, like RPyRaiseException,
+   come from exctransformer via the table in extfunc.py. */
+
+#define RPyFetchException(etypevar, evaluevar, type_of_evaluevar) do {  \
+		etypevar = RPyFetchExceptionType();			\
+		evaluevar = (type_of_evaluevar)RPyFetchExceptionValue(); \
+		RPyClearException();					\
+	} while (0)
+
+/* implementations */
+
+void _RPyRaiseSimpleException(RPYTHON_EXCEPTION rexc)
+{
+	/* XXX msg is ignored */
+	RPyRaiseException(RPYTHON_TYPE_OF_EXC_INST(rexc), rexc);
+}
+
+
+/******************************************************************/
+#endif                                             /* HAVE_RTYPER */
+/******************************************************************/

pypy/translator/c/src/exception.h

 
 /************************************************************/
- /***  C header subsection: exceptions                     ***/
+/***  C header subsection: exceptions                     ***/
 
 #ifdef HAVE_RTYPER // shrug, hopefully dies with PYPY_NOT_MAIN_FILE
 
     ? (RPyDebugReturnShowException(msg, __FILE__, __LINE__, __FUNCTION__), 1) \
     : 0                                                                 \
   )
+#endif
+/* !DO_LOG_EXC: define the function anyway, so that we can shut
+   off the prints of a debug_exc by remaking only testing_1.o */
 void RPyDebugReturnShowException(const char *msg, const char *filename,
                                  long lineno, const char *functionname);
-#ifndef PYPY_NOT_MAIN_FILE
-void RPyDebugReturnShowException(const char *msg, const char *filename,
-                                 long lineno, const char *functionname)