Spaces:
Build error
Build error
/////////////// CModulePreamble /////////////// | |
// For use in DL_IMPORT/DL_EXPORT macros. | |
// CPython has required PY_LONG_LONG support for years, even if HAVE_LONG_LONG is not defined for us | |
// looks like calling _PyType_Lookup() isn't safe in Py<=2.6/3.1 | |
// Python 3.11a2 hid _PyLong_FormatAdvancedWriter and _PyFloat_FormatAdvancedWriter | |
// therefore disable unicode writer until a better alternative appears | |
// Python 3.11 deleted localplus argument from frame object, which is used in our | |
// fast_pycall code | |
/* These short defines can easily conflict with other code */ | |
/* Compile-time sanity check that these are indeed equal. Github issue #2670. */ | |
enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; | |
// restrict | |
// unused attribute | |
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } | |
typedef unsigned char uint8_t; | |
typedef unsigned int uint32_t; | |
typedef unsigned __int8 uint8_t; | |
typedef unsigned __int32 uint32_t; | |
/////////////// CInitCode /////////////// | |
// inline attribute | |
/////////////// CppInitCode /////////////// | |
// inline attribute | |
// Work around clang bug http://stackoverflow.com/questions/21847816/c-invoke-nested-template-class-destructor | |
template<typename T> | |
void __Pyx_call_destructor(T& x) { | |
x.~T(); | |
} | |
// Used for temporary variables of "reference" type. | |
template<typename T> | |
class __Pyx_FakeReference { | |
public: | |
__Pyx_FakeReference() : ptr(NULL) { } | |
// __Pyx_FakeReference(T& ref) : ptr(&ref) { } | |
// Const version needed as Cython doesn't know about const overloads (e.g. for stl containers). | |
__Pyx_FakeReference(const T& ref) : ptr(const_cast<T*>(&ref)) { } | |
T *operator->() { return ptr; } | |
T *operator&() { return ptr; } | |
operator T&() { return *ptr; } | |
// TODO(robertwb): Delegate all operators (or auto-generate unwrapping code where needed). | |
template<typename U> bool operator ==(U other) { return *ptr == other; } | |
template<typename U> bool operator !=(U other) { return *ptr != other; } | |
private: | |
T *ptr; | |
}; | |
/////////////// PythonCompatibility /////////////// | |
static CYTHON_INLINE PyCodeObject* __Pyx_PyCode_New(int a, int k, int l, int s, int f, | |
PyObject *code, PyObject *c, PyObject* n, PyObject *v, | |
PyObject *fv, PyObject *cell, PyObject* fn, | |
PyObject *name, int fline, PyObject *lnos) { | |
// TODO - currently written to be simple and work in limited API etc. | |
// A more optimized version would be good | |
PyObject *kwds=NULL, *argcount=NULL, *posonlyargcount=NULL, *kwonlyargcount=NULL; | |
PyObject *nlocals=NULL, *stacksize=NULL, *flags=NULL, *replace=NULL, *call_result=NULL, *empty=NULL; | |
const char *fn_cstr=NULL; | |
const char *name_cstr=NULL; | |
PyCodeObject* co=NULL; | |
PyObject *type, *value, *traceback; | |
// we must be able to call this while an exception is happening - thus clear then restore the state | |
PyErr_Fetch(&type, &value, &traceback); | |
if (!(kwds=PyDict_New())) goto end; | |
if (!(argcount=PyLong_FromLong(a))) goto end; | |
if (PyDict_SetItemString(kwds, "co_argcount", argcount) != 0) goto end; | |
if (!(posonlyargcount=PyLong_FromLong(0))) goto end; | |
if (PyDict_SetItemString(kwds, "co_posonlyargcount", posonlyargcount) != 0) goto end; | |
if (!(kwonlyargcount=PyLong_FromLong(k))) goto end; | |
if (PyDict_SetItemString(kwds, "co_kwonlyargcount", kwonlyargcount) != 0) goto end; | |
if (!(nlocals=PyLong_FromLong(l))) goto end; | |
if (PyDict_SetItemString(kwds, "co_nlocals", nlocals) != 0) goto end; | |
if (!(stacksize=PyLong_FromLong(s))) goto end; | |
if (PyDict_SetItemString(kwds, "co_stacksize", stacksize) != 0) goto end; | |
if (!(flags=PyLong_FromLong(f))) goto end; | |
if (PyDict_SetItemString(kwds, "co_flags", flags) != 0) goto end; | |
if (PyDict_SetItemString(kwds, "co_code", code) != 0) goto end; | |
if (PyDict_SetItemString(kwds, "co_consts", c) != 0) goto end; | |
if (PyDict_SetItemString(kwds, "co_names", n) != 0) goto end; | |
if (PyDict_SetItemString(kwds, "co_varnames", v) != 0) goto end; | |
if (PyDict_SetItemString(kwds, "co_freevars", fv) != 0) goto end; | |
if (PyDict_SetItemString(kwds, "co_cellvars", cell) != 0) goto end; | |
if (PyDict_SetItemString(kwds, "co_linetable", lnos) != 0) goto end; | |
if (!(fn_cstr=PyUnicode_AsUTF8AndSize(fn, NULL))) goto end; | |
if (!(name_cstr=PyUnicode_AsUTF8AndSize(name, NULL))) goto end; | |
if (!(co = PyCode_NewEmpty(fn_cstr, name_cstr, fline))) goto end; | |
if (!(replace = PyObject_GetAttrString((PyObject*)co, "replace"))) goto cleanup_code_too; | |
if (!(empty = PyTuple_New(0))) goto cleanup_code_too; // unfortunately __pyx_empty_tuple isn't available here | |
if (!(call_result = PyObject_Call(replace, empty, kwds))) goto cleanup_code_too; | |
Py_XDECREF((PyObject*)co); | |
co = (PyCodeObject*)call_result; | |
call_result = NULL; | |
if (0) { | |
cleanup_code_too: | |
Py_XDECREF((PyObject*)co); | |
co = NULL; | |
} | |
end: | |
Py_XDECREF(kwds); | |
Py_XDECREF(argcount); | |
Py_XDECREF(posonlyargcount); | |
Py_XDECREF(kwonlyargcount); | |
Py_XDECREF(nlocals); | |
Py_XDECREF(stacksize); | |
Py_XDECREF(replace); | |
Py_XDECREF(call_result); | |
Py_XDECREF(empty); | |
if (type) { | |
PyErr_Restore(type, value, traceback); | |
} | |
return co; | |
} | |
// already defined for Stackless Python (all versions) and C-Python >= 3.7 | |
// value if defined: Stackless Python < 3.6: 0x80 else 0x100 | |
// new in CPython 3.6, but changed in 3.7 - see | |
// positional-only parameters: | |
// https://bugs.python.org/issue29464 | |
// const args: | |
// https://bugs.python.org/issue32240 | |
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); | |
// new in CPython 3.7, used to be old signature of _PyCFunctionFast() in 3.6 | |
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, | |
Py_ssize_t nargs, PyObject *kwnames); | |
// special C-API functions only in Pyston | |
//#elif PY_VERSION_HEX >= 0x03050200 | |
// Actually added in 3.5.2, but compiling against that does not guarantee that we get imported there. | |
// TSS (Thread Specific Storage) API | |
typedef int Py_tss_t; | |
static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { | |
*key = PyThread_create_key(); | |
return 0; /* PyThread_create_key reports success always */ | |
} | |
static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { | |
Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); | |
*key = Py_tss_NEEDS_INIT; | |
return key; | |
} | |
static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { | |
PyObject_Free(key); | |
} | |
static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { | |
return *key != Py_tss_NEEDS_INIT; | |
} | |
static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { | |
PyThread_delete_key(*key); | |
*key = Py_tss_NEEDS_INIT; | |
} | |
static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { | |
return PyThread_set_key_value(*key, value); | |
} | |
static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { | |
return PyThread_get_key_value(*key); | |
} | |
// PyThread_delete_key_value(key) is equalivalent to PyThread_set_key_value(key, NULL) | |
// PyThread_ReInitTLS() is a no-op | |
/* new Py3.3 unicode type (PEP 393) */ | |
// Py3.12 / PEP-623 will remove wstr type unicode strings and all of the PyUnicode_READY() machinery. | |
// Avoid calling deprecated C-API functions in Py3.9+ that PEP-623 schedules for removal in Py3.12. | |
// https://www.python.org/dev/peps/pep-0623/ | |
/* (void)(k) => avoid unused variable warning due to macro: */ | |
// ("..." % x) must call PyNumber_Remainder() if x is a string subclass that implements "__rmod__()". | |
// PyPy3 used to define "PyObject_Unicode" | |
// NOTE: might fail with exception => check for -1 | |
typedef long Py_hash_t; | |
// backport of PyAsyncMethods from Py3.5 to older Py3.x versions | |
// (mis-)using the "tp_reserved" type slot which is re-activated as "tp_as_async" in Py3.5 | |
typedef struct { | |
unaryfunc am_await; | |
unaryfunc am_aiter; | |
unaryfunc am_anext; | |
} __Pyx_PyAsyncMethodsStruct; | |
/////////////// SmallCodeConfig.proto /////////////// | |
/////////////// PyModInitFuncType.proto /////////////// | |
// Py2: define this to void manually because PyMODINIT_FUNC adds __declspec(dllexport) to it's definition. | |
// Py3+: define this to PyObject * manually because PyMODINIT_FUNC adds __declspec(dllexport) to it's definition. | |
/////////////// FastTypeChecks.proto /////////////// | |
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);/*proto*/ | |
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);/*proto*/ | |
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);/*proto*/ | |
/////////////// FastTypeChecks /////////////// | |
//@requires: Exceptions.c::PyThreadStateGet | |
//@requires: Exceptions.c::PyErrFetchRestore | |
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { | |
while (a) { | |
a = a->tp_base; | |
if (a == b) | |
return 1; | |
} | |
return b == &PyBaseObject_Type; | |
} | |
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { | |
PyObject *mro; | |
if (a == b) return 1; | |
mro = a->tp_mro; | |
if (likely(mro)) { | |
Py_ssize_t i, n; | |
n = PyTuple_GET_SIZE(mro); | |
for (i = 0; i < n; i++) { | |
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) | |
return 1; | |
} | |
return 0; | |
} | |
// should only get here for incompletely initialised types, i.e. never under normal usage patterns | |
return __Pyx_InBases(a, b); | |
} | |
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { | |
// PyObject_IsSubclass() can recurse and therefore is not safe | |
PyObject *exception, *value, *tb; | |
int res; | |
__Pyx_PyThreadState_declare | |
__Pyx_PyThreadState_assign | |
__Pyx_ErrFetch(&exception, &value, &tb); | |
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; | |
// This function must not fail, so print the error here (which also clears it) | |
if (unlikely(res == -1)) { | |
PyErr_WriteUnraisable(err); | |
res = 0; | |
} | |
if (!res) { | |
res = PyObject_IsSubclass(err, exc_type2); | |
// This function must not fail, so print the error here (which also clears it) | |
if (unlikely(res == -1)) { | |
PyErr_WriteUnraisable(err); | |
res = 0; | |
} | |
} | |
__Pyx_ErrRestore(exception, value, tb); | |
return res; | |
} | |
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { | |
int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; | |
if (!res) { | |
res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); | |
} | |
return res; | |
} | |
// so far, we only call PyErr_GivenExceptionMatches() with an exception type (not instance) as first argument | |
// => optimise for that case | |
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { | |
Py_ssize_t i, n; | |
assert(PyExceptionClass_Check(exc_type)); | |
n = PyTuple_GET_SIZE(tuple); | |
// the tighter subtype checking in Py3 allows faster out-of-order comparison | |
for (i=0; i<n; i++) { | |
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; | |
} | |
for (i=0; i<n; i++) { | |
PyObject *t = PyTuple_GET_ITEM(tuple, i); | |
if (likely(exc_type == t)) return 1; | |
if (likely(PyExceptionClass_Check(t))) { | |
if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; | |
} else { | |
// FIXME: Py3: PyErr_SetString(PyExc_TypeError, "catching classes that do not inherit from BaseException is not allowed"); | |
} | |
} | |
return 0; | |
} | |
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { | |
if (likely(err == exc_type)) return 1; | |
if (likely(PyExceptionClass_Check(err))) { | |
if (likely(PyExceptionClass_Check(exc_type))) { | |
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); | |
} else if (likely(PyTuple_Check(exc_type))) { | |
return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); | |
} else { | |
// FIXME: Py3: PyErr_SetString(PyExc_TypeError, "catching classes that do not inherit from BaseException is not allowed"); | |
} | |
} | |
return PyErr_GivenExceptionMatches(err, exc_type); | |
} | |
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { | |
// Only used internally with known exception types => pure safety check assertions. | |
assert(PyExceptionClass_Check(exc_type1)); | |
assert(PyExceptionClass_Check(exc_type2)); | |
if (likely(err == exc_type1 || err == exc_type2)) return 1; | |
if (likely(PyExceptionClass_Check(err))) { | |
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); | |
} | |
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); | |
} | |
/////////////// MathInitCode /////////////// | |
static CYTHON_INLINE float __PYX_NAN() { | |
// Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and | |
// a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is | |
// a quiet NaN. | |
float value; | |
memset(&value, 0xFF, sizeof(value)); | |
return value; | |
} | |
/////////////// UtilityFunctionPredeclarations.proto /////////////// | |
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; | |
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ | |
/////////////// ForceInitThreads.proto /////////////// | |
//@proto_block: utility_code_proto_before_types | |
/////////////// InitThreads.init /////////////// | |
PyEval_InitThreads(); | |
/////////////// ModuleCreationPEP489 /////////////// | |
//@substitute: naming | |
//#if CYTHON_PEP489_MULTI_PHASE_INIT | |
static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { | |
static PY_INT64_T main_interpreter_id = -1; | |
PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); | |
if (main_interpreter_id == -1) { | |
main_interpreter_id = current_id; | |
return (unlikely(current_id == -1)) ? -1 : 0; | |
} else if (unlikely(main_interpreter_id != current_id)) | |
static PyInterpreterState *main_interpreter = NULL; | |
PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; | |
if (!main_interpreter) { | |
main_interpreter = current_interpreter; | |
} else if (unlikely(main_interpreter != current_interpreter)) | |
{ | |
PyErr_SetString( | |
PyExc_ImportError, | |
"Interpreter change detected - this module can only be loaded into one interpreter per process."); | |
return -1; | |
} | |
return 0; | |
} | |
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { | |
PyObject *value = PyObject_GetAttrString(spec, from_name); | |
int result = 0; | |
if (likely(value)) { | |
if (allow_none || value != Py_None) { | |
result = PyDict_SetItemString(moddict, to_name, value); | |
} | |
Py_DECREF(value); | |
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { | |
PyErr_Clear(); | |
} else { | |
result = -1; | |
} | |
return result; | |
} | |
static CYTHON_SMALL_CODE PyObject* ${pymodule_create_func_cname}(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { | |
PyObject *module = NULL, *moddict, *modname; | |
// For now, we only have exactly one module instance. | |
if (__Pyx_check_single_interpreter()) | |
return NULL; | |
if (${module_cname}) | |
return __Pyx_NewRef(${module_cname}); | |
modname = PyObject_GetAttrString(spec, "name"); | |
if (unlikely(!modname)) goto bad; | |
module = PyModule_NewObject(modname); | |
Py_DECREF(modname); | |
if (unlikely(!module)) goto bad; | |
moddict = PyModule_GetDict(module); | |
if (unlikely(!moddict)) goto bad; | |
// moddict is a borrowed reference | |
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; | |
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; | |
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; | |
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; | |
return module; | |
bad: | |
Py_XDECREF(module); | |
return NULL; | |
} | |
//#endif | |
/////////////// CodeObjectCache.proto /////////////// | |
typedef struct { | |
PyCodeObject* code_object; | |
int code_line; | |
} __Pyx_CodeObjectCacheEntry; | |
struct __Pyx_CodeObjectCache { | |
int count; | |
int max_count; | |
__Pyx_CodeObjectCacheEntry* entries; | |
}; | |
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; | |
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); | |
static PyCodeObject *__pyx_find_code_object(int code_line); | |
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); | |
/////////////// CodeObjectCache /////////////// | |
// Note that errors are simply ignored in the code below. | |
// This is just a cache, if a lookup or insertion fails - so what? | |
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { | |
int start = 0, mid = 0, end = count - 1; | |
if (end >= 0 && code_line > entries[end].code_line) { | |
return count; | |
} | |
while (start < end) { | |
mid = start + (end - start) / 2; | |
if (code_line < entries[mid].code_line) { | |
end = mid; | |
} else if (code_line > entries[mid].code_line) { | |
start = mid + 1; | |
} else { | |
return mid; | |
} | |
} | |
if (code_line <= entries[mid].code_line) { | |
return mid; | |
} else { | |
return mid + 1; | |
} | |
} | |
static PyCodeObject *__pyx_find_code_object(int code_line) { | |
PyCodeObject* code_object; | |
int pos; | |
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { | |
return NULL; | |
} | |
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); | |
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { | |
return NULL; | |
} | |
code_object = __pyx_code_cache.entries[pos].code_object; | |
Py_INCREF(code_object); | |
return code_object; | |
} | |
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { | |
int pos, i; | |
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; | |
if (unlikely(!code_line)) { | |
return; | |
} | |
if (unlikely(!entries)) { | |
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); | |
if (likely(entries)) { | |
__pyx_code_cache.entries = entries; | |
__pyx_code_cache.max_count = 64; | |
__pyx_code_cache.count = 1; | |
entries[0].code_line = code_line; | |
entries[0].code_object = code_object; | |
Py_INCREF(code_object); | |
} | |
return; | |
} | |
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); | |
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { | |
PyCodeObject* tmp = entries[pos].code_object; | |
entries[pos].code_object = code_object; | |
Py_DECREF(tmp); | |
return; | |
} | |
if (__pyx_code_cache.count == __pyx_code_cache.max_count) { | |
int new_max = __pyx_code_cache.max_count + 64; | |
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( | |
__pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); | |
if (unlikely(!entries)) { | |
return; | |
} | |
__pyx_code_cache.entries = entries; | |
__pyx_code_cache.max_count = new_max; | |
} | |
for (i=__pyx_code_cache.count; i>pos; i--) { | |
entries[i] = entries[i-1]; | |
} | |
entries[pos].code_line = code_line; | |
entries[pos].code_object = code_object; | |
__pyx_code_cache.count++; | |
Py_INCREF(code_object); | |
} | |
/////////////// CodeObjectCache.cleanup /////////////// | |
if (__pyx_code_cache.entries) { | |
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; | |
int i, count = __pyx_code_cache.count; | |
__pyx_code_cache.count = 0; | |
__pyx_code_cache.max_count = 0; | |
__pyx_code_cache.entries = NULL; | |
for (i=0; i<count; i++) { | |
Py_DECREF(entries[i].code_object); | |
} | |
PyMem_Free(entries); | |
} | |
/////////////// CheckBinaryVersion.proto /////////////// | |
static int __Pyx_check_binary_version(void); | |
/////////////// CheckBinaryVersion /////////////// | |
static int __Pyx_check_binary_version(void) { | |
char ctversion[4], rtversion[4]; | |
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); | |
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); | |
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { | |
char message[200]; | |
PyOS_snprintf(message, sizeof(message), | |
"compiletime version %s of module '%.100s' " | |
"does not match runtime version %s", | |
ctversion, __Pyx_MODULE_NAME, rtversion); | |
return PyErr_WarnEx(NULL, message, 1); | |
} | |
return 0; | |
} | |
/////////////// IsLittleEndian.proto /////////////// | |
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); | |
/////////////// IsLittleEndian /////////////// | |
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) | |
{ | |
union { | |
uint32_t u32; | |
uint8_t u8[4]; | |
} S; | |
S.u32 = 0x01020304; | |
return S.u8[0] == 4; | |
} | |
/////////////// Refnanny.proto /////////////// | |
typedef struct { | |
void (*INCREF)(void*, PyObject*, int); | |
void (*DECREF)(void*, PyObject*, int); | |
void (*GOTREF)(void*, PyObject*, int); | |
void (*GIVEREF)(void*, PyObject*, int); | |
void* (*SetupContext)(const char*, int, const char*); | |
void (*FinishContext)(void**); | |
} __Pyx_RefNannyAPIStruct; | |
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; | |
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ | |
/////////////// Refnanny /////////////// | |
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { | |
PyObject *m = NULL, *p = NULL; | |
void *r = NULL; | |
m = PyImport_ImportModule(modname); | |
if (!m) goto end; | |
p = PyObject_GetAttrString(m, "RefNannyAPI"); | |
if (!p) goto end; | |
r = PyLong_AsVoidPtr(p); | |
end: | |
Py_XDECREF(p); | |
Py_XDECREF(m); | |
return (__Pyx_RefNannyAPIStruct *)r; | |
} | |
/////////////// ImportRefnannyAPI /////////////// | |
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); | |
if (!__Pyx_RefNanny) { | |
PyErr_Clear(); | |
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); | |
if (!__Pyx_RefNanny) | |
Py_FatalError("failed to import 'refnanny' module"); | |
} | |
/////////////// RegisterModuleCleanup.proto /////////////// | |
//@substitute: naming | |
static void ${cleanup_cname}(PyObject *self); /*proto*/ | |
static int __Pyx_RegisterCleanup(void); /*proto*/ | |
/////////////// RegisterModuleCleanup /////////////// | |
//@substitute: naming | |
static PyObject* ${cleanup_cname}_atexit(PyObject *module, CYTHON_UNUSED PyObject *unused) { | |
${cleanup_cname}(module); | |
Py_INCREF(Py_None); return Py_None; | |
} | |
static int __Pyx_RegisterCleanup(void) { | |
// Don't use Py_AtExit because that has a 32-call limit and is called | |
// after python finalization. | |
// Also, we try to prepend the cleanup function to "atexit._exithandlers" | |
// in Py2 because CPython runs them last-to-first. Being run last allows | |
// user exit code to run before us that may depend on the globals | |
// and cached objects that we are about to clean up. | |
static PyMethodDef cleanup_def = { | |
"__cleanup", (PyCFunction)${cleanup_cname}_atexit, METH_NOARGS, 0}; | |
PyObject *cleanup_func = 0; | |
PyObject *atexit = 0; | |
PyObject *reg = 0; | |
PyObject *args = 0; | |
PyObject *res = 0; | |
int ret = -1; | |
cleanup_func = PyCFunction_New(&cleanup_def, 0); | |
if (!cleanup_func) | |
goto bad; | |
atexit = PyImport_ImportModule("atexit"); | |
if (!atexit) | |
goto bad; | |
reg = PyObject_GetAttrString(atexit, "_exithandlers"); | |
if (reg && PyList_Check(reg)) { | |
PyObject *a, *kw; | |
a = PyTuple_New(0); | |
kw = PyDict_New(); | |
if (!a || !kw) { | |
Py_XDECREF(a); | |
Py_XDECREF(kw); | |
goto bad; | |
} | |
args = PyTuple_Pack(3, cleanup_func, a, kw); | |
Py_DECREF(a); | |
Py_DECREF(kw); | |
if (!args) | |
goto bad; | |
ret = PyList_Insert(reg, 0, args); | |
} else { | |
if (!reg) | |
PyErr_Clear(); | |
Py_XDECREF(reg); | |
reg = PyObject_GetAttrString(atexit, "register"); | |
if (!reg) | |
goto bad; | |
args = PyTuple_Pack(1, cleanup_func); | |
if (!args) | |
goto bad; | |
res = PyObject_CallObject(reg, args); | |
if (!res) | |
goto bad; | |
ret = 0; | |
} | |
bad: | |
Py_XDECREF(cleanup_func); | |
Py_XDECREF(atexit); | |
Py_XDECREF(reg); | |
Py_XDECREF(args); | |
Py_XDECREF(res); | |
return ret; | |
} | |
/////////////// FastGil.init /////////////// | |
__Pyx_FastGilFuncInit(); | |
/////////////// NoFastGil.proto /////////////// | |
//@proto_block: utility_code_proto_before_types | |
/////////////// FastGil.proto /////////////// | |
//@proto_block: utility_code_proto_before_types | |
struct __Pyx_FastGilVtab { | |
PyGILState_STATE (*Fast_PyGILState_Ensure)(void); | |
void (*Fast_PyGILState_Release)(PyGILState_STATE oldstate); | |
void (*FastGIL_Remember)(void); | |
void (*FastGIL_Forget)(void); | |
}; | |
static void __Pyx_FastGIL_Noop(void) {} | |
static struct __Pyx_FastGilVtab __Pyx_FastGilFuncs = { | |
PyGILState_Ensure, | |
PyGILState_Release, | |
__Pyx_FastGIL_Noop, | |
__Pyx_FastGIL_Noop | |
}; | |
static void __Pyx_FastGilFuncInit(void); | |
/////////////// FastGil /////////////// | |
//@requires: CommonStructures.c::FetchCommonPointer | |
// The implementations of PyGILState_Ensure/Release calls PyThread_get_key_value | |
// several times which is turns out to be quite slow (slower in fact than | |
// acquiring the GIL itself). Simply storing it in a thread local for the | |
// common case is much faster. | |
// To make optimal use of this thread local, we attempt to share it between | |
// modules. | |
static CYTHON_THREAD_LOCAL PyThreadState *__Pyx_FastGil_tcur = NULL; | |
static CYTHON_THREAD_LOCAL int __Pyx_FastGil_tcur_depth = 0; | |
static int __Pyx_FastGil_autoTLSkey = -1; | |
static CYTHON_INLINE void __Pyx_FastGIL_Remember0(void) { | |
++__Pyx_FastGil_tcur_depth; | |
} | |
static CYTHON_INLINE void __Pyx_FastGIL_Forget0(void) { | |
if (--__Pyx_FastGil_tcur_depth == 0) { | |
__Pyx_FastGil_tcur = NULL; | |
} | |
} | |
static CYTHON_INLINE PyThreadState *__Pyx_FastGil_get_tcur(void) { | |
PyThreadState *tcur = __Pyx_FastGil_tcur; | |
if (tcur == NULL) { | |
tcur = __Pyx_FastGil_tcur = (PyThreadState*)PyThread_get_key_value(__Pyx_FastGil_autoTLSkey); | |
} | |
return tcur; | |
} | |
static PyGILState_STATE __Pyx_FastGil_PyGILState_Ensure(void) { | |
int current; | |
PyThreadState *tcur; | |
__Pyx_FastGIL_Remember0(); | |
tcur = __Pyx_FastGil_get_tcur(); | |
if (tcur == NULL) { | |
// Uninitialized, need to initialize now. | |
return PyGILState_Ensure(); | |
} | |
current = tcur == __Pyx_PyThreadState_Current; | |
if (current == 0) { | |
PyEval_RestoreThread(tcur); | |
} | |
++tcur->gilstate_counter; | |
return current ? PyGILState_LOCKED : PyGILState_UNLOCKED; | |
} | |
static void __Pyx_FastGil_PyGILState_Release(PyGILState_STATE oldstate) { | |
PyThreadState *tcur = __Pyx_FastGil_get_tcur(); | |
__Pyx_FastGIL_Forget0(); | |
if (tcur->gilstate_counter == 1) { | |
// This is the last lock, do all the cleanup as well. | |
PyGILState_Release(oldstate); | |
} else { | |
--tcur->gilstate_counter; | |
if (oldstate == PyGILState_UNLOCKED) { | |
PyEval_SaveThread(); | |
} | |
} | |
} | |
static void __Pyx_FastGilFuncInit0(void) { | |
/* Try to detect autoTLSkey. */ | |
int key; | |
void* this_thread_state = (void*) PyGILState_GetThisThreadState(); | |
for (key = 0; key < 100; key++) { | |
if (PyThread_get_key_value(key) == this_thread_state) { | |
__Pyx_FastGil_autoTLSkey = key; | |
break; | |
} | |
} | |
if (__Pyx_FastGil_autoTLSkey != -1) { | |
PyObject* capsule = NULL; | |
PyObject* abi_module = NULL; | |
__Pyx_PyGILState_Ensure = __Pyx_FastGil_PyGILState_Ensure; | |
__Pyx_PyGILState_Release = __Pyx_FastGil_PyGILState_Release; | |
__Pyx_FastGIL_Remember = __Pyx_FastGIL_Remember0; | |
__Pyx_FastGIL_Forget = __Pyx_FastGIL_Forget0; | |
capsule = PyCapsule_New(&__Pyx_FastGilFuncs, __Pyx_FastGIL_PyCapsule, NULL); | |
abi_module = PyImport_AddModule(__Pyx_FastGIL_ABI_module); | |
if (capsule && abi_module) { | |
PyObject_SetAttrString(abi_module, __Pyx_FastGIL_PyCapsuleName, capsule); | |
} | |
Py_XDECREF(capsule); | |
} | |
} | |
static void __Pyx_FastGilFuncInit0(void) { | |
CYTHON_UNUSED void* force_use = (void*)&__Pyx_FetchCommonPointer; | |
} | |
static void __Pyx_FastGilFuncInit(void) { | |
struct __Pyx_FastGilVtab* shared = (struct __Pyx_FastGilVtab*)PyCapsule_Import(__Pyx_FastGIL_PyCapsule, 1); | |
struct __Pyx_FastGilVtab* shared = NULL; | |
if (shared) { | |
__Pyx_FastGilFuncs = *shared; | |
} else { | |
PyErr_Clear(); | |
__Pyx_FastGilFuncInit0(); | |
} | |
} | |