@@ -272,7 +272,7 @@ AVBufferPool *av_buffer_pool_init2(size_t size, void *opaque,
pool->alloc = av_buffer_alloc; // fallback
pool->pool_free = pool_free;
- atomic_init(&pool->refcount, 1);
+ pool->refcount = 1;
return pool;
}
@@ -288,7 +288,7 @@ AVBufferPool *av_buffer_pool_init(size_t size, AVBufferRef* (*alloc)(size_t size
pool->size = size;
pool->alloc = alloc ? alloc : av_buffer_alloc;
- atomic_init(&pool->refcount, 1);
+ pool->refcount = 1;
return pool;
}
@@ -322,6 +322,7 @@ static void buffer_pool_free(AVBufferPool *pool)
void av_buffer_pool_uninit(AVBufferPool **ppool)
{
AVBufferPool *pool;
+ unsigned refcount;
if (!ppool || !*ppool)
return;
@@ -330,9 +331,10 @@ void av_buffer_pool_uninit(AVBufferPool **ppool)
ff_mutex_lock(&pool->mutex);
buffer_pool_flush(pool);
+ refcount = --pool->refcount;
ff_mutex_unlock(&pool->mutex);
- if (atomic_fetch_sub_explicit(&pool->refcount, 1, memory_order_acq_rel) == 1)
+ if (!refcount)
buffer_pool_free(pool);
}
@@ -340,13 +342,15 @@ static void pool_release_buffer(void *opaque, uint8_t *data)
{
BufferPoolEntry *buf = opaque;
AVBufferPool *pool = buf->pool;
+ unsigned refcount;
ff_mutex_lock(&pool->mutex);
buf->next = pool->pool;
pool->pool = buf;
+ refcount = --pool->refcount;
ff_mutex_unlock(&pool->mutex);
- if (atomic_fetch_sub_explicit(&pool->refcount, 1, memory_order_acq_rel) == 1)
+ if (!refcount)
buffer_pool_free(pool);
}
@@ -400,10 +404,9 @@ AVBufferRef *av_buffer_pool_get(AVBufferPool *pool)
} else {
ret = pool_alloc_buffer(pool);
}
- ff_mutex_unlock(&pool->mutex);
-
if (ret)
- atomic_fetch_add_explicit(&pool->refcount, 1, memory_order_relaxed);
+ pool->refcount++;
+ ff_mutex_unlock(&pool->mutex);
return ret;
}
@@ -99,7 +99,7 @@ struct AVBufferPool {
* buffers have been released, then it's safe to free the pool and all
* the buffers in it.
*/
- atomic_uint refcount;
+ unsigned refcount;
size_t size;
void *opaque;
The AVBufferPool is already guarded by a mutex, so one can just move use said mutex to also protect modifying the refcount. Doing so yields a minor decrease in complexity. Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com> --- Currently freeing the entries is serialized; the API documentation does not force us to do so ("is thread-safe as long as either the default alloc callback is used, or the user-supplied one is thread-safe"). I could easily implement unserializing it if desired, yet it would be incompatible with this patch. libavutil/buffer.c | 17 ++++++++++------- libavutil/buffer_internal.h | 2 +- 2 files changed, 11 insertions(+), 8 deletions(-)