Skip to content

Commit 9318df9

Browse files
DinoVcolesbury
andcommitted
Bring in a subset of biased reference counting:
colesbury/nogil@b6b12a9a94e Co-Authored-By: Sam Gross <[email protected]>
1 parent 191531f commit 9318df9

File tree

1 file changed

+151
-0
lines changed

1 file changed

+151
-0
lines changed

Include/internal/pycore_object.h

Lines changed: 151 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -376,6 +376,157 @@ static inline void _PyObject_GC_UNTRACK(
376376
_PyObject_GC_UNTRACK(__FILE__, __LINE__, _PyObject_CAST(op))
377377
#endif
378378

379+
#ifdef Py_GIL_DISABLED
380+
381+
/* Tries to increment an object's reference count
382+
*
383+
* This is a specialized version of _Py_TryIncref that only succeeds if the
384+
* object is immortal or local to this thread. It does not handle the case
385+
* where the reference count modification requires an atomic operation. This
386+
* allows call sites to specialize for the immortal/local case.
387+
*/
388+
static inline Py_ALWAYS_INLINE int
389+
_Py_TryIncrefFast(PyObject *op) {
390+
uint32_t local = _Py_atomic_load_uint32_relaxed(&op->ob_ref_local);
391+
local += 1;
392+
if (local == 0) {
393+
// immortal
394+
return 1;
395+
}
396+
if (_Py_IsOwnedByCurrentThread(op)) {
397+
_Py_atomic_store_uint32_relaxed(&op->ob_ref_local, local);
398+
#ifdef Py_REF_DEBUG
399+
_Py_IncRefTotal();
400+
#endif
401+
return 1;
402+
}
403+
return 0;
404+
}
405+
406+
static inline Py_ALWAYS_INLINE int
407+
_Py_TryIncRefShared(PyObject *op)
408+
{
409+
for (;;) {
410+
Py_ssize_t shared = _Py_atomic_load_ssize_relaxed(&op->ob_ref_shared);
411+
412+
// If the shared refcount is zero and the object is either merged
413+
// or may not have weak references, then we cannot incref it.
414+
if (shared == 0 || shared == _Py_REF_MERGED) {
415+
return 0;
416+
}
417+
418+
if (_Py_atomic_compare_exchange_ssize(
419+
&op->ob_ref_shared,
420+
&shared,
421+
shared + (1 << _Py_REF_SHARED_SHIFT))) {
422+
#ifdef Py_REF_DEBUG
423+
_Py_IncRefTotal();
424+
#endif
425+
return 1;
426+
}
427+
}
428+
}
429+
430+
/* Tries to incref the object op and ensures that *src still points to it. */
431+
static inline int
432+
_Py_TryAcquireObject(PyObject **src, PyObject *op)
433+
{
434+
if (_Py_TryIncrefFast(op)) {
435+
return 1;
436+
}
437+
if (!_Py_TryIncRefShared(op)) {
438+
return 0;
439+
}
440+
if (op != _Py_atomic_load_ptr(src)) {
441+
Py_DECREF(op);
442+
return 0;
443+
}
444+
return 1;
445+
}
446+
447+
/* Loads and increfs an object from ptr, which may contain a NULL value.
448+
Safe with concurrent (atomic) updates to ptr.
449+
NOTE: The writer must set maybe-weakref on the stored object! */
450+
static inline Py_ALWAYS_INLINE PyObject *
451+
_Py_XFetchRef(PyObject **ptr)
452+
{
453+
#ifdef Py_NOGIL
454+
for (;;) {
455+
PyObject *value = _Py_atomic_load_ptr(ptr);
456+
if (value == NULL) {
457+
return value;
458+
}
459+
if (_Py_TryAcquireObject(ptr, value)) {
460+
return value;
461+
}
462+
}
463+
#else
464+
return Py_XNewRef(*ptr);
465+
#endif
466+
}
467+
468+
/* Attempts to loads and increfs an object from ptr. Returns NULL
469+
on failure, which may be due to a NULL value or a concurrent update. */
470+
static inline Py_ALWAYS_INLINE PyObject *
471+
_Py_TryXFetchRef(PyObject **ptr)
472+
{
473+
PyObject *value = _Py_atomic_load_ptr(ptr);
474+
if (value == NULL) {
475+
return value;
476+
}
477+
if (_Py_TryAcquireObject(ptr, value)) {
478+
return value;
479+
}
480+
return NULL;
481+
}
482+
483+
/* Like Py_NewRef but also optimistically sets _Py_REF_MAYBE_WEAKREF
484+
on objects owned by a different thread. */
485+
static inline PyObject *
486+
_Py_NewRefWithLock(PyObject *op)
487+
{
488+
_Py_INCREF_STAT_INC();
489+
uint32_t local = _Py_atomic_load_uint32_relaxed(&op->ob_ref_local);
490+
local += 1;
491+
if (local == 0) {
492+
return op;
493+
}
494+
495+
#ifdef Py_REF_DEBUG
496+
_Py_IncRefTotal();
497+
#endif
498+
if (_Py_IsOwnedByCurrentThread(op)) {
499+
_Py_atomic_store_uint32_relaxed(&op->ob_ref_local, local);
500+
}
501+
else {
502+
for (;;) {
503+
Py_ssize_t shared = _Py_atomic_load_ssize_relaxed(&op->ob_ref_shared);
504+
Py_ssize_t new_shared = shared + (1 << _Py_REF_SHARED_SHIFT);
505+
if ((shared & _Py_REF_SHARED_FLAG_MASK) == 0) {
506+
new_shared |= _Py_REF_MAYBE_WEAKREF;
507+
}
508+
if (_Py_atomic_compare_exchange_ssize(
509+
&op->ob_ref_shared,
510+
&shared,
511+
new_shared)) {
512+
return op;
513+
}
514+
}
515+
}
516+
return op;
517+
}
518+
519+
static inline PyObject *
520+
_Py_XNewRefWithLock(PyObject *obj)
521+
{
522+
if (obj == NULL) {
523+
return NULL;
524+
}
525+
return _Py_NewRefWithLock(obj);
526+
}
527+
528+
#endif
529+
379530
#ifdef Py_REF_DEBUG
380531
extern void _PyInterpreterState_FinalizeRefTotal(PyInterpreterState *);
381532
extern void _Py_FinalizeRefTotal(_PyRuntimeState *);

0 commit comments

Comments
 (0)