2 * This file is part of FFmpeg.
4 * FFmpeg is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * FFmpeg is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with FFmpeg; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "buffer_internal.h"
28 AVBufferRef
*av_buffer_create(uint8_t *data
, int size
,
29 void (*free
)(void *opaque
, uint8_t *data
),
30 void *opaque
, int flags
)
32 AVBufferRef
*ref
= NULL
;
35 buf
= av_mallocz(sizeof(*buf
));
41 buf
->free
= free
? free
: av_buffer_default_free
;
45 if (flags
& AV_BUFFER_FLAG_READONLY
)
46 buf
->flags
|= BUFFER_FLAG_READONLY
;
48 ref
= av_mallocz(sizeof(*ref
));
61 void av_buffer_default_free(void *opaque
, uint8_t *data
)
66 AVBufferRef
*av_buffer_alloc(int size
)
68 AVBufferRef
*ret
= NULL
;
71 data
= av_malloc(size
);
75 ret
= av_buffer_create(data
, size
, av_buffer_default_free
, NULL
, 0);
82 AVBufferRef
*av_buffer_allocz(int size
)
84 AVBufferRef
*ret
= av_buffer_alloc(size
);
88 memset(ret
->data
, 0, size
);
92 AVBufferRef
*av_buffer_ref(AVBufferRef
*buf
)
94 AVBufferRef
*ret
= av_mallocz(sizeof(*ret
));
101 avpriv_atomic_int_add_and_fetch(&buf
->buffer
->refcount
, 1);
106 void av_buffer_unref(AVBufferRef
**buf
)
115 if (!avpriv_atomic_int_add_and_fetch(&b
->refcount
, -1)) {
116 b
->free(b
->opaque
, b
->data
);
121 int av_buffer_is_writable(const AVBufferRef
*buf
)
123 if (buf
->buffer
->flags
& AV_BUFFER_FLAG_READONLY
)
126 return avpriv_atomic_int_get(&buf
->buffer
->refcount
) == 1;
129 void *av_buffer_get_opaque(const AVBufferRef
*buf
)
131 return buf
->buffer
->opaque
;
134 int av_buffer_get_ref_count(const AVBufferRef
*buf
)
136 return buf
->buffer
->refcount
;
139 int av_buffer_make_writable(AVBufferRef
**pbuf
)
141 AVBufferRef
*newbuf
, *buf
= *pbuf
;
143 if (av_buffer_is_writable(buf
))
146 newbuf
= av_buffer_alloc(buf
->size
);
148 return AVERROR(ENOMEM
);
150 memcpy(newbuf
->data
, buf
->data
, buf
->size
);
151 av_buffer_unref(pbuf
);
157 int av_buffer_realloc(AVBufferRef
**pbuf
, int size
)
159 AVBufferRef
*buf
= *pbuf
;
163 /* allocate a new buffer with av_realloc(), so it will be reallocatable
165 uint8_t *data
= av_realloc(NULL
, size
);
167 return AVERROR(ENOMEM
);
169 buf
= av_buffer_create(data
, size
, av_buffer_default_free
, NULL
, 0);
172 return AVERROR(ENOMEM
);
175 buf
->buffer
->flags
|= BUFFER_FLAG_REALLOCATABLE
;
179 } else if (buf
->size
== size
)
182 if (!(buf
->buffer
->flags
& BUFFER_FLAG_REALLOCATABLE
) ||
183 !av_buffer_is_writable(buf
)) {
184 /* cannot realloc, allocate a new reallocable buffer and copy data */
185 AVBufferRef
*new = NULL
;
187 av_buffer_realloc(&new, size
);
189 return AVERROR(ENOMEM
);
191 memcpy(new->data
, buf
->data
, FFMIN(size
, buf
->size
));
193 av_buffer_unref(pbuf
);
198 tmp
= av_realloc(buf
->buffer
->data
, size
);
200 return AVERROR(ENOMEM
);
202 buf
->buffer
->data
= buf
->data
= tmp
;
203 buf
->buffer
->size
= buf
->size
= size
;
207 AVBufferPool
*av_buffer_pool_init(int size
, AVBufferRef
* (*alloc
)(int size
))
209 AVBufferPool
*pool
= av_mallocz(sizeof(*pool
));
213 ff_mutex_init(&pool
->mutex
, NULL
);
216 pool
->alloc
= alloc
? alloc
: av_buffer_alloc
;
218 avpriv_atomic_int_set(&pool
->refcount
, 1);
224 * This function gets called when the pool has been uninited and
225 * all the buffers returned to it.
227 static void buffer_pool_free(AVBufferPool
*pool
)
230 BufferPoolEntry
*buf
= pool
->pool
;
231 pool
->pool
= buf
->next
;
233 buf
->free(buf
->opaque
, buf
->data
);
236 ff_mutex_destroy(&pool
->mutex
);
240 void av_buffer_pool_uninit(AVBufferPool
**ppool
)
244 if (!ppool
|| !*ppool
)
249 if (!avpriv_atomic_int_add_and_fetch(&pool
->refcount
, -1))
250 buffer_pool_free(pool
);
253 /* remove the whole buffer list from the pool and return it */
254 static BufferPoolEntry
*get_pool(AVBufferPool
*pool
)
256 BufferPoolEntry
*cur
= *(void * volatile *)&pool
->pool
, *last
= NULL
;
258 while (cur
!= last
) {
260 cur
= avpriv_atomic_ptr_cas((void * volatile *)&pool
->pool
, last
, NULL
);
268 static void add_to_pool(BufferPoolEntry
*buf
)
271 BufferPoolEntry
*cur
, *end
= buf
;
280 while (avpriv_atomic_ptr_cas((void * volatile *)&pool
->pool
, NULL
, buf
)) {
281 /* pool is not empty, retrieve it and append it to our list */
282 cur
= get_pool(pool
);
289 static void pool_release_buffer(void *opaque
, uint8_t *data
)
291 BufferPoolEntry
*buf
= opaque
;
292 AVBufferPool
*pool
= buf
->pool
;
294 if(CONFIG_MEMORY_POISONING
)
295 memset(buf
->data
, FF_MEMORY_POISON
, pool
->size
);
300 ff_mutex_lock(&pool
->mutex
);
301 buf
->next
= pool
->pool
;
303 ff_mutex_unlock(&pool
->mutex
);
306 if (!avpriv_atomic_int_add_and_fetch(&pool
->refcount
, -1))
307 buffer_pool_free(pool
);
310 /* allocate a new buffer and override its free() callback so that
311 * it is returned to the pool on free */
312 static AVBufferRef
*pool_alloc_buffer(AVBufferPool
*pool
)
314 BufferPoolEntry
*buf
;
317 ret
= pool
->alloc(pool
->size
);
321 buf
= av_mallocz(sizeof(*buf
));
323 av_buffer_unref(&ret
);
327 buf
->data
= ret
->buffer
->data
;
328 buf
->opaque
= ret
->buffer
->opaque
;
329 buf
->free
= ret
->buffer
->free
;
332 ret
->buffer
->opaque
= buf
;
333 ret
->buffer
->free
= pool_release_buffer
;
336 avpriv_atomic_int_add_and_fetch(&pool
->refcount
, 1);
337 avpriv_atomic_int_add_and_fetch(&pool
->nb_allocated
, 1);
343 AVBufferRef
*av_buffer_pool_get(AVBufferPool
*pool
)
346 BufferPoolEntry
*buf
;
349 /* check whether the pool is empty */
350 buf
= get_pool(pool
);
351 if (!buf
&& pool
->refcount
<= pool
->nb_allocated
) {
352 av_log(NULL
, AV_LOG_DEBUG
, "Pool race dectected, spining to avoid overallocation and eventual OOM\n");
353 while (!buf
&& avpriv_atomic_int_get(&pool
->refcount
) <= avpriv_atomic_int_get(&pool
->nb_allocated
))
354 buf
= get_pool(pool
);
358 return pool_alloc_buffer(pool
);
360 /* keep the first entry, return the rest of the list to the pool */
361 add_to_pool(buf
->next
);
364 ret
= av_buffer_create(buf
->data
, pool
->size
, pool_release_buffer
,
371 ff_mutex_lock(&pool
->mutex
);
374 ret
= av_buffer_create(buf
->data
, pool
->size
, pool_release_buffer
,
377 pool
->pool
= buf
->next
;
381 ret
= pool_alloc_buffer(pool
);
383 ff_mutex_unlock(&pool
->mutex
);
387 avpriv_atomic_int_add_and_fetch(&pool
->refcount
, 1);