freedreno: refactor bo-cache API
Split out interface to allocate from and release to bo-cache, and get rid of direct usage of bucket level API from fd_bo/etc. Signed-off-by: Rob Clark <robclark@freedesktop.org>main
parent
b18b6e21fc
commit
0b34b68307
|
@ -84,7 +84,8 @@ static struct fd_bo * bo_from_handle(struct fd_device *dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Frees older cached buffers. Called under table_lock */
|
/* Frees older cached buffers. Called under table_lock */
|
||||||
drm_private void fd_cleanup_bo_cache(struct fd_bo_cache *cache, time_t time)
|
drm_private void
|
||||||
|
fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -168,21 +169,19 @@ static struct fd_bo *find_in_bucket(struct fd_bo_bucket *bucket, uint32_t flags)
|
||||||
return bo;
|
return bo;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* NOTE: size is potentially rounded up to bucket size: */
|
||||||
struct fd_bo *
|
drm_private struct fd_bo *
|
||||||
fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
|
fd_bo_cache_alloc(struct fd_bo_cache *cache, uint32_t *size, uint32_t flags)
|
||||||
{
|
{
|
||||||
struct fd_bo *bo = NULL;
|
struct fd_bo *bo = NULL;
|
||||||
struct fd_bo_bucket *bucket;
|
struct fd_bo_bucket *bucket;
|
||||||
uint32_t handle;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
size = ALIGN(size, 4096);
|
*size = ALIGN(*size, 4096);
|
||||||
bucket = get_bucket(&dev->bo_cache, size);
|
bucket = get_bucket(cache, *size);
|
||||||
|
|
||||||
/* see if we can be green and recycle: */
|
/* see if we can be green and recycle: */
|
||||||
if (bucket) {
|
if (bucket) {
|
||||||
size = bucket->size;
|
*size = bucket->size;
|
||||||
bo = find_in_bucket(bucket, flags);
|
bo = find_in_bucket(bucket, flags);
|
||||||
if (bo) {
|
if (bo) {
|
||||||
atomic_set(&bo->refcnt, 1);
|
atomic_set(&bo->refcnt, 1);
|
||||||
|
@ -191,6 +190,20 @@ fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct fd_bo *
|
||||||
|
fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
|
||||||
|
{
|
||||||
|
struct fd_bo *bo = NULL;
|
||||||
|
uint32_t handle;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
bo = fd_bo_cache_alloc(&dev->bo_cache, &size, flags);
|
||||||
|
if (bo)
|
||||||
|
return bo;
|
||||||
|
|
||||||
ret = dev->funcs->bo_new_handle(dev, size, flags, &handle);
|
ret = dev->funcs->bo_new_handle(dev, size, flags, &handle);
|
||||||
if (ret)
|
if (ret)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -290,6 +303,32 @@ struct fd_bo * fd_bo_ref(struct fd_bo *bo)
|
||||||
return bo;
|
return bo;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
drm_private int
|
||||||
|
fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo)
|
||||||
|
{
|
||||||
|
struct fd_bo_bucket *bucket = get_bucket(cache, bo->size);
|
||||||
|
|
||||||
|
/* see if we can be green and recycle: */
|
||||||
|
if (bucket) {
|
||||||
|
struct timespec time;
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_MONOTONIC, &time);
|
||||||
|
|
||||||
|
bo->free_time = time.tv_sec;
|
||||||
|
list_addtail(&bo->list, &bucket->list);
|
||||||
|
fd_bo_cache_cleanup(cache, time.tv_sec);
|
||||||
|
|
||||||
|
/* bo's in the bucket cache don't have a ref and
|
||||||
|
* don't hold a ref to the dev:
|
||||||
|
*/
|
||||||
|
fd_device_del_locked(bo->dev);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
void fd_bo_del(struct fd_bo *bo)
|
void fd_bo_del(struct fd_bo *bo)
|
||||||
{
|
{
|
||||||
struct fd_device *dev = bo->dev;
|
struct fd_device *dev = bo->dev;
|
||||||
|
@ -299,30 +338,12 @@ void fd_bo_del(struct fd_bo *bo)
|
||||||
|
|
||||||
pthread_mutex_lock(&table_lock);
|
pthread_mutex_lock(&table_lock);
|
||||||
|
|
||||||
if (bo->bo_reuse) {
|
if (bo->bo_reuse && (fd_bo_cache_free(&dev->bo_cache, bo) == 0))
|
||||||
struct fd_bo_bucket *bucket = get_bucket(&dev->bo_cache, bo->size);
|
goto out;
|
||||||
|
|
||||||
/* see if we can be green and recycle: */
|
|
||||||
if (bucket) {
|
|
||||||
struct timespec time;
|
|
||||||
|
|
||||||
clock_gettime(CLOCK_MONOTONIC, &time);
|
|
||||||
|
|
||||||
bo->free_time = time.tv_sec;
|
|
||||||
list_addtail(&bo->list, &bucket->list);
|
|
||||||
fd_cleanup_bo_cache(&dev->bo_cache, time.tv_sec);
|
|
||||||
|
|
||||||
/* bo's in the bucket cache don't have a ref and
|
|
||||||
* don't hold a ref to the dev:
|
|
||||||
*/
|
|
||||||
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bo_del(bo);
|
bo_del(bo);
|
||||||
out:
|
|
||||||
fd_device_del_locked(dev);
|
fd_device_del_locked(dev);
|
||||||
|
out:
|
||||||
pthread_mutex_unlock(&table_lock);
|
pthread_mutex_unlock(&table_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -54,7 +54,7 @@ add_bucket(struct fd_bo_cache *cache, int size)
|
||||||
cache->num_buckets++;
|
cache->num_buckets++;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
drm_private void
|
||||||
fd_bo_cache_init(struct fd_bo_cache *cache)
|
fd_bo_cache_init(struct fd_bo_cache *cache)
|
||||||
{
|
{
|
||||||
unsigned long size, cache_max_size = 64 * 1024 * 1024;
|
unsigned long size, cache_max_size = 64 * 1024 * 1024;
|
||||||
|
@ -137,7 +137,7 @@ struct fd_device * fd_device_ref(struct fd_device *dev)
|
||||||
|
|
||||||
static void fd_device_del_impl(struct fd_device *dev)
|
static void fd_device_del_impl(struct fd_device *dev)
|
||||||
{
|
{
|
||||||
fd_cleanup_bo_cache(&dev->bo_cache, 0);
|
fd_bo_cache_cleanup(&dev->bo_cache, 0);
|
||||||
drmHashDestroy(dev->handle_table);
|
drmHashDestroy(dev->handle_table);
|
||||||
drmHashDestroy(dev->name_table);
|
drmHashDestroy(dev->name_table);
|
||||||
if (dev->closefd)
|
if (dev->closefd)
|
||||||
|
|
|
@ -96,7 +96,11 @@ struct fd_device {
|
||||||
int closefd; /* call close(fd) upon destruction */
|
int closefd; /* call close(fd) upon destruction */
|
||||||
};
|
};
|
||||||
|
|
||||||
drm_private void fd_cleanup_bo_cache(struct fd_bo_cache *cache, time_t time);
|
drm_private void fd_bo_cache_init(struct fd_bo_cache *cache);
|
||||||
|
drm_private void fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time);
|
||||||
|
drm_private struct fd_bo * fd_bo_cache_alloc(struct fd_bo_cache *cache,
|
||||||
|
uint32_t *size, uint32_t flags);
|
||||||
|
drm_private int fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo);
|
||||||
|
|
||||||
/* for where @table_lock is already held: */
|
/* for where @table_lock is already held: */
|
||||||
drm_private void fd_device_del_locked(struct fd_device *dev);
|
drm_private void fd_device_del_locked(struct fd_device *dev);
|
||||||
|
|
Loading…
Reference in New Issue