Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions src/picologging/handler.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,18 @@ PyObject* Handler_handle(Handler *self, PyObject *record) {
if (Filterer_filter(&self->filterer, (PyObject*)record) != Py_True)
Py_RETURN_NONE;

// Release GIL before acquiring C++ lock to prevent deadlock
PyThreadState *_save = PyEval_SaveThread();
try {
self->lock->lock();
} catch (const std::exception& e) {
PyEval_RestoreThread(_save);
PyErr_Format(PyExc_RuntimeError, "Cannot acquire thread lock, %s", e.what());
return nullptr;
}
// Reacquire GIL after acquiring C++ lock
PyEval_RestoreThread(_save);

PyObject* result = nullptr;
if (StreamHandler_CheckExact(((PyObject*)self))){
PyObject* args[1] = {record};
Expand All @@ -67,7 +73,11 @@ PyObject* Handler_handle(Handler *self, PyObject *record) {
result = PyObject_CallMethod_ONEARG((PyObject*)self, self->_const_emit, record);
}

// Release GIL before releasing C++ lock
_save = PyEval_SaveThread();
self->lock->unlock();
PyEval_RestoreThread(_save);

return result == nullptr ? nullptr : Py_True;
}

Expand Down Expand Up @@ -108,12 +118,16 @@ PyObject* Handler_setFormatter(Handler *self, PyObject *formatter) {
}

PyObject* Handler_acquire(Handler *self){
PyThreadState *_save = PyEval_SaveThread();
self->lock->lock();
PyEval_RestoreThread(_save);
Py_RETURN_NONE;
}

PyObject* Handler_release(Handler *self){
PyThreadState *_save = PyEval_SaveThread();
self->lock->unlock();
PyEval_RestoreThread(_save);
Py_RETURN_NONE;
}

Expand Down
47 changes: 31 additions & 16 deletions src/picologging/logger.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,7 @@ LogRecord* Logger_logMessageAsRecord(Logger* self, unsigned short level, PyObjec
long lineno = f != nullptr ? PyFrame_GETLINENO(f) : 0;
PyObject *co_name = f != nullptr ? PyFrame_GETCODE(f)->co_name : self->_const_unknown;

PyObject* processed_stack_info = nullptr;
if (stack_info == Py_True){
PyObject* mod = PICOLOGGING_MODULE(); // borrowed reference
PyObject* modDict = PyModule_GetDict(mod); // borrowed reference
Expand All @@ -210,6 +211,7 @@ LogRecord* Logger_logMessageAsRecord(Logger* self, unsigned short level, PyObjec
NULL);
if (printStackResult == nullptr)
{
Py_XDECREF(sio);
Py_XDECREF(sio_cls);
Py_XDECREF(print_stack);
return nullptr; // Got exception in print_stack()
Expand All @@ -232,7 +234,7 @@ LogRecord* Logger_logMessageAsRecord(Logger* self, unsigned short level, PyObjec
Py_DECREF(s);
s = s2;
}
stack_info = s;
processed_stack_info = s;
}

LogRecord* record = (LogRecord*) (&LogRecordType)->tp_alloc(&LogRecordType, 0);
Expand All @@ -242,7 +244,7 @@ LogRecord* Logger_logMessageAsRecord(Logger* self, unsigned short level, PyObjec
return nullptr;
}

return LogRecord_create(
LogRecord* result = LogRecord_create(
record,
self->name,
msg,
Expand All @@ -252,8 +254,13 @@ LogRecord* Logger_logMessageAsRecord(Logger* self, unsigned short level, PyObjec
lineno,
exc_info,
co_name,
stack_info
processed_stack_info ? processed_stack_info : stack_info
);

// Clean up the processed stack info if it was created
Py_XDECREF(processed_stack_info);

return result;
}

inline PyObject* PyArg_GetKeyword(PyObject *const *args, Py_ssize_t npargs, PyObject *kwnames, PyObject* keyword){
Expand Down Expand Up @@ -281,34 +288,42 @@ PyObject* Logger_logAndHandle(Logger *self, PyObject *const *args, Py_ssize_t nf
PyTuple_SET_ITEM(args_, i - 1, args[i]);
Py_INCREF(args[i]);
}
PyObject* exc_info = kwnames != nullptr ? PyArg_GetKeyword(args, npargs, kwnames, self->_const_exc_info) : nullptr;
if (exc_info == nullptr){
PyObject* exc_info_kw = kwnames != nullptr ? PyArg_GetKeyword(args, npargs, kwnames, self->_const_exc_info) : nullptr;
PyObject* exc_info;
if (exc_info_kw == nullptr){
exc_info = Py_NewRef(Py_None);
} else {
if (PyExceptionInstance_Check(exc_info)){
if (PyExceptionInstance_Check(exc_info_kw)){
PyObject * unpackedExcInfo = PyTuple_New(3);
PyObject * excType = (PyObject*)Py_TYPE(exc_info);
PyObject * excType = (PyObject*)Py_TYPE(exc_info_kw);
PyTuple_SET_ITEM(unpackedExcInfo, 0, excType);
Py_INCREF(excType);
PyTuple_SET_ITEM(unpackedExcInfo, 1, exc_info);
Py_INCREF(exc_info);
PyObject* traceback = PyObject_GetAttrString(exc_info, "__traceback__");
PyTuple_SET_ITEM(unpackedExcInfo, 1, exc_info_kw);
Py_INCREF(exc_info_kw);
PyObject* traceback = PyObject_GetAttrString(exc_info_kw, "__traceback__");
PyTuple_SET_ITEM(unpackedExcInfo, 2, traceback);
Py_INCREF(traceback);
exc_info = unpackedExcInfo;
} else if (!PyTuple_CheckExact(exc_info)){ // Probably Py_TRUE, fetch current exception as tuple
} else if (!PyTuple_CheckExact(exc_info_kw)){ // Probably Py_TRUE, fetch current exception as tuple
PyObject * unpackedExcInfo = PyTuple_New(3);
PyErr_GetExcInfo(&PyTuple_GET_ITEM(unpackedExcInfo, 0), &PyTuple_GET_ITEM(unpackedExcInfo, 1), &PyTuple_GET_ITEM(unpackedExcInfo, 2));
exc_info = unpackedExcInfo;
} else {
exc_info = Py_NewRef(exc_info_kw);
}
}
PyObject* extra = kwnames != nullptr ? PyArg_GetKeyword(args, npargs, kwnames, self->_const_extra) : nullptr;
if (extra == nullptr){
PyObject* extra_kw = kwnames != nullptr ? PyArg_GetKeyword(args, npargs, kwnames, self->_const_extra) : nullptr;
PyObject* extra;
if (extra_kw == nullptr){
extra = Py_NewRef(Py_None);
} else {
extra = Py_NewRef(extra_kw);
}
PyObject* stack_info = kwnames != nullptr ? PyArg_GetKeyword(args, npargs, kwnames, self->_const_stack_info) : nullptr;
if (stack_info == nullptr){
PyObject* stack_info_kw = kwnames != nullptr ? PyArg_GetKeyword(args, npargs, kwnames, self->_const_stack_info) : nullptr;
PyObject* stack_info;
if (stack_info_kw == nullptr){
stack_info = Py_NewRef(Py_False);
} else {
stack_info = Py_NewRef(stack_info_kw);
}
LogRecord *record = Logger_logMessageAsRecord(
self, level, msg, args_, exc_info, extra, stack_info, 1);
Expand Down
12 changes: 10 additions & 2 deletions src/picologging/streamhandler.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -47,10 +47,18 @@ PyObject* StreamHandler_dealloc(StreamHandler *self) {
PyObject* flush (StreamHandler* self){
if (!self->stream_has_flush)
Py_RETURN_NONE;
Handler_acquire(&self->handler);

PyThreadState *_save = PyEval_SaveThread();
self->handler.lock->lock();
PyEval_RestoreThread(_save);

PyObject* result = PyObject_CallMethod_NOARGS(self->stream, self->_const_flush);
Py_XDECREF(result);
Handler_release(&self->handler);

_save = PyEval_SaveThread();
self->handler.lock->unlock();
PyEval_RestoreThread(_save);

Py_RETURN_NONE;
}

Expand Down
125 changes: 125 additions & 0 deletions tests/unit/test_memory_leak.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
#!/usr/bin/env python3
"""
Test for memory leak fix in picologging issue #223.
"""
import unittest
import sys
import tracemalloc
import picologging


class TestMemoryLeak(unittest.TestCase):

def setUp(self):
# Configure picologging
picologging.basicConfig(
level=picologging.INFO,
stream=sys.stdout,
format="%(asctime)s - %(levelname)s - %(message)s"
)

def test_no_memory_leak_basic_logging(self):
"""Test that basic logging doesn't leak memory."""
tracemalloc.start()

logger = picologging.getLogger("test_basic")

# Take initial snapshot
snapshot1 = tracemalloc.take_snapshot()

# Log many messages
for i in range(1000):
logger.info("test message %d", i)

# Take final snapshot
snapshot2 = tracemalloc.take_snapshot()

# Check memory growth
top_stats = snapshot2.compare_to(snapshot1, 'lineno')

# The memory growth should be minimal (less than 1MB for 1000 messages)
current, peak = tracemalloc.get_traced_memory()
tracemalloc.stop()

# This is a reasonable threshold - logging 1000 messages shouldn't use more than 5MB
self.assertLess(current, 5 * 1024 * 1024, "Memory usage too high, possible leak detected")

def test_no_memory_leak_with_args(self):
"""Test that logging with arguments doesn't leak memory."""
tracemalloc.start()

logger = picologging.getLogger("test_args")

# Take initial snapshot
snapshot1 = tracemalloc.take_snapshot()

# Log many messages with arguments
for i in range(1000):
logger.info("test message %s with number %d", "hello" * 10, i)

# Take final snapshot
snapshot2 = tracemalloc.take_snapshot()

# Check memory growth
current, peak = tracemalloc.get_traced_memory()
tracemalloc.stop()

# This is a reasonable threshold - logging 1000 messages shouldn't use more than 5MB
self.assertLess(current, 5 * 1024 * 1024, "Memory usage too high, possible leak detected")

def test_no_memory_leak_with_stack_info(self):
"""Test that logging with stack_info doesn't leak memory."""
tracemalloc.start()

logger = picologging.getLogger("test_stack")

# Take initial snapshot
snapshot1 = tracemalloc.take_snapshot()

# Log messages with stack info (this was the main source of the leak)
for i in range(100): # Fewer iterations since stack info is expensive
logger.info("test message %d", i, stack_info=True)

# Take final snapshot
snapshot2 = tracemalloc.take_snapshot()

# Check memory growth
current, peak = tracemalloc.get_traced_memory()
tracemalloc.stop()

# Stack info is more expensive, but 100 messages shouldn't use more than 10MB
self.assertLess(current, 10 * 1024 * 1024, "Memory usage too high, possible leak detected")

def test_no_memory_leak_multiple_loggers(self):
"""Test that multiple loggers don't leak memory."""
tracemalloc.start()

# Create multiple loggers (similar to the original issue)
loggers = [
picologging.getLogger("logger1"),
picologging.getLogger("logger2"),
picologging.getLogger("logger3"),
picologging.getLogger("logger4")
]

# Take initial snapshot
snapshot1 = tracemalloc.take_snapshot()

# Log messages from multiple loggers
for i in range(250): # 250 * 4 = 1000 total messages
for logger in loggers:
logger.info("message %d from %s", i, logger.name)

# Take final snapshot
snapshot2 = tracemalloc.take_snapshot()

# Check memory growth
current, peak = tracemalloc.get_traced_memory()
tracemalloc.stop()

# Multiple loggers shouldn't significantly increase memory usage
self.assertLess(current, 5 * 1024 * 1024, "Memory usage too high, possible leak detected")


if __name__ == '__main__':
unittest.main()
Loading