freedreno: split out fd_bo_cache

Eventually we'll want a separate bo-cache for ringbuffer bo's, since
ringbuffer bo's get vmap'd on the kernel side, it is preferrable to
re-use them as ringbuffers rather than something else.  Plus should
help to add madvise support if it is a bit better decoupled from bo
allocation (next patch).

Signed-off-by: Rob Clark <robclark@freedesktop.org>
main
Rob Clark 2016-05-30 11:49:39 -04:00
parent 2ca73c666a
commit b18b6e21fc
3 changed files with 37 additions and 34 deletions

View File

@ -84,15 +84,15 @@ static struct fd_bo * bo_from_handle(struct fd_device *dev,
}
/* Frees older cached buffers. Called under table_lock */
drm_private void fd_cleanup_bo_cache(struct fd_device *dev, time_t time)
drm_private void fd_cleanup_bo_cache(struct fd_bo_cache *cache, time_t time)
{
int i;
if (dev->time == time)
if (cache->time == time)
return;
for (i = 0; i < dev->num_buckets; i++) {
struct fd_bo_bucket *bucket = &dev->cache_bucket[i];
for (i = 0; i < cache->num_buckets; i++) {
struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
struct fd_bo *bo;
while (!LIST_IS_EMPTY(&bucket->list)) {
@ -107,18 +107,18 @@ drm_private void fd_cleanup_bo_cache(struct fd_device *dev, time_t time)
}
}
dev->time = time;
cache->time = time;
}
static struct fd_bo_bucket * get_bucket(struct fd_device *dev, uint32_t size)
static struct fd_bo_bucket * get_bucket(struct fd_bo_cache *cache, uint32_t size)
{
int i;
/* hmm, this is what intel does, but I suppose we could calculate our
* way to the correct bucket size rather than looping..
*/
for (i = 0; i < dev->num_buckets; i++) {
struct fd_bo_bucket *bucket = &dev->cache_bucket[i];
for (i = 0; i < cache->num_buckets; i++) {
struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
if (bucket->size >= size) {
return bucket;
}
@ -135,8 +135,7 @@ static int is_idle(struct fd_bo *bo)
DRM_FREEDRENO_PREP_NOSYNC) == 0;
}
static struct fd_bo *find_in_bucket(struct fd_device *dev,
struct fd_bo_bucket *bucket, uint32_t flags)
static struct fd_bo *find_in_bucket(struct fd_bo_bucket *bucket, uint32_t flags)
{
struct fd_bo *bo = NULL;
@ -179,12 +178,12 @@ fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
int ret;
size = ALIGN(size, 4096);
bucket = get_bucket(dev, size);
bucket = get_bucket(&dev->bo_cache, size);
/* see if we can be green and recycle: */
if (bucket) {
size = bucket->size;
bo = find_in_bucket(dev, bucket, flags);
bo = find_in_bucket(bucket, flags);
if (bo) {
atomic_set(&bo->refcnt, 1);
fd_device_ref(bo->dev);
@ -301,7 +300,7 @@ void fd_bo_del(struct fd_bo *bo)
pthread_mutex_lock(&table_lock);
if (bo->bo_reuse) {
struct fd_bo_bucket *bucket = get_bucket(dev, bo->size);
struct fd_bo_bucket *bucket = get_bucket(&dev->bo_cache, bo->size);
/* see if we can be green and recycle: */
if (bucket) {
@ -311,7 +310,7 @@ void fd_bo_del(struct fd_bo *bo)
bo->free_time = time.tv_sec;
list_addtail(&bo->list, &bucket->list);
fd_cleanup_bo_cache(dev, time.tv_sec);
fd_cleanup_bo_cache(&dev->bo_cache, time.tv_sec);
/* bo's in the bucket cache don't have a ref and
* don't hold a ref to the dev:

View File

@ -43,19 +43,19 @@ struct fd_device * kgsl_device_new(int fd);
struct fd_device * msm_device_new(int fd);
static void
add_bucket(struct fd_device *dev, int size)
add_bucket(struct fd_bo_cache *cache, int size)
{
unsigned int i = dev->num_buckets;
unsigned int i = cache->num_buckets;
assert(i < ARRAY_SIZE(dev->cache_bucket));
assert(i < ARRAY_SIZE(cache->cache_bucket));
list_inithead(&dev->cache_bucket[i].list);
dev->cache_bucket[i].size = size;
dev->num_buckets++;
list_inithead(&cache->cache_bucket[i].list);
cache->cache_bucket[i].size = size;
cache->num_buckets++;
}
static void
init_cache_buckets(struct fd_device *dev)
fd_bo_cache_init(struct fd_bo_cache *cache)
{
unsigned long size, cache_max_size = 64 * 1024 * 1024;
@ -67,16 +67,16 @@ init_cache_buckets(struct fd_device *dev)
* width/height alignment and rounding of sizes to pages will
* get us useful cache hit rates anyway)
*/
add_bucket(dev, 4096);
add_bucket(dev, 4096 * 2);
add_bucket(dev, 4096 * 3);
add_bucket(cache, 4096);
add_bucket(cache, 4096 * 2);
add_bucket(cache, 4096 * 3);
/* Initialize the linked lists for BO reuse cache. */
for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
add_bucket(dev, size);
add_bucket(dev, size + size * 1 / 4);
add_bucket(dev, size + size * 2 / 4);
add_bucket(dev, size + size * 3 / 4);
add_bucket(cache, size);
add_bucket(cache, size + size * 1 / 4);
add_bucket(cache, size + size * 2 / 4);
add_bucket(cache, size + size * 3 / 4);
}
}
@ -113,7 +113,7 @@ struct fd_device * fd_device_new(int fd)
dev->fd = fd;
dev->handle_table = drmHashCreate();
dev->name_table = drmHashCreate();
init_cache_buckets(dev);
fd_bo_cache_init(&dev->bo_cache);
return dev;
}
@ -137,7 +137,7 @@ struct fd_device * fd_device_ref(struct fd_device *dev)
static void fd_device_del_impl(struct fd_device *dev)
{
fd_cleanup_bo_cache(dev, 0);
fd_cleanup_bo_cache(&dev->bo_cache, 0);
drmHashDestroy(dev->handle_table);
drmHashDestroy(dev->name_table);
if (dev->closefd)

View File

@ -68,6 +68,12 @@ struct fd_bo_bucket {
struct list_head list;
};
struct fd_bo_cache {
struct fd_bo_bucket cache_bucket[14 * 4];
int num_buckets;
time_t time;
};
struct fd_device {
int fd;
atomic_t refcnt;
@ -85,14 +91,12 @@ struct fd_device {
const struct fd_device_funcs *funcs;
struct fd_bo_bucket cache_bucket[14 * 4];
int num_buckets;
time_t time;
struct fd_bo_cache bo_cache;
int closefd; /* call close(fd) upon destruction */
};
drm_private void fd_cleanup_bo_cache(struct fd_device *dev, time_t time);
drm_private void fd_cleanup_bo_cache(struct fd_bo_cache *cache, time_t time);
/* for where @table_lock is already held: */
drm_private void fd_device_del_locked(struct fd_device *dev);