freedreno: split out fd_bo_cache
Eventually we'll want a separate bo-cache for ringbuffer bo's, since ringbuffer bo's get vmap'd on the kernel side, it is preferrable to re-use them as ringbuffers rather than something else. Plus should help to add madvise support if it is a bit better decoupled from bo allocation (next patch). Signed-off-by: Rob Clark <robclark@freedesktop.org>main
parent
2ca73c666a
commit
b18b6e21fc
|
@ -84,15 +84,15 @@ static struct fd_bo * bo_from_handle(struct fd_device *dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Frees older cached buffers. Called under table_lock */
|
/* Frees older cached buffers. Called under table_lock */
|
||||||
drm_private void fd_cleanup_bo_cache(struct fd_device *dev, time_t time)
|
drm_private void fd_cleanup_bo_cache(struct fd_bo_cache *cache, time_t time)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (dev->time == time)
|
if (cache->time == time)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (i = 0; i < dev->num_buckets; i++) {
|
for (i = 0; i < cache->num_buckets; i++) {
|
||||||
struct fd_bo_bucket *bucket = &dev->cache_bucket[i];
|
struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
|
||||||
struct fd_bo *bo;
|
struct fd_bo *bo;
|
||||||
|
|
||||||
while (!LIST_IS_EMPTY(&bucket->list)) {
|
while (!LIST_IS_EMPTY(&bucket->list)) {
|
||||||
|
@ -107,18 +107,18 @@ drm_private void fd_cleanup_bo_cache(struct fd_device *dev, time_t time)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dev->time = time;
|
cache->time = time;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct fd_bo_bucket * get_bucket(struct fd_device *dev, uint32_t size)
|
static struct fd_bo_bucket * get_bucket(struct fd_bo_cache *cache, uint32_t size)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/* hmm, this is what intel does, but I suppose we could calculate our
|
/* hmm, this is what intel does, but I suppose we could calculate our
|
||||||
* way to the correct bucket size rather than looping..
|
* way to the correct bucket size rather than looping..
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < dev->num_buckets; i++) {
|
for (i = 0; i < cache->num_buckets; i++) {
|
||||||
struct fd_bo_bucket *bucket = &dev->cache_bucket[i];
|
struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
|
||||||
if (bucket->size >= size) {
|
if (bucket->size >= size) {
|
||||||
return bucket;
|
return bucket;
|
||||||
}
|
}
|
||||||
|
@ -135,8 +135,7 @@ static int is_idle(struct fd_bo *bo)
|
||||||
DRM_FREEDRENO_PREP_NOSYNC) == 0;
|
DRM_FREEDRENO_PREP_NOSYNC) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct fd_bo *find_in_bucket(struct fd_device *dev,
|
static struct fd_bo *find_in_bucket(struct fd_bo_bucket *bucket, uint32_t flags)
|
||||||
struct fd_bo_bucket *bucket, uint32_t flags)
|
|
||||||
{
|
{
|
||||||
struct fd_bo *bo = NULL;
|
struct fd_bo *bo = NULL;
|
||||||
|
|
||||||
|
@ -179,12 +178,12 @@ fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
size = ALIGN(size, 4096);
|
size = ALIGN(size, 4096);
|
||||||
bucket = get_bucket(dev, size);
|
bucket = get_bucket(&dev->bo_cache, size);
|
||||||
|
|
||||||
/* see if we can be green and recycle: */
|
/* see if we can be green and recycle: */
|
||||||
if (bucket) {
|
if (bucket) {
|
||||||
size = bucket->size;
|
size = bucket->size;
|
||||||
bo = find_in_bucket(dev, bucket, flags);
|
bo = find_in_bucket(bucket, flags);
|
||||||
if (bo) {
|
if (bo) {
|
||||||
atomic_set(&bo->refcnt, 1);
|
atomic_set(&bo->refcnt, 1);
|
||||||
fd_device_ref(bo->dev);
|
fd_device_ref(bo->dev);
|
||||||
|
@ -301,7 +300,7 @@ void fd_bo_del(struct fd_bo *bo)
|
||||||
pthread_mutex_lock(&table_lock);
|
pthread_mutex_lock(&table_lock);
|
||||||
|
|
||||||
if (bo->bo_reuse) {
|
if (bo->bo_reuse) {
|
||||||
struct fd_bo_bucket *bucket = get_bucket(dev, bo->size);
|
struct fd_bo_bucket *bucket = get_bucket(&dev->bo_cache, bo->size);
|
||||||
|
|
||||||
/* see if we can be green and recycle: */
|
/* see if we can be green and recycle: */
|
||||||
if (bucket) {
|
if (bucket) {
|
||||||
|
@ -311,7 +310,7 @@ void fd_bo_del(struct fd_bo *bo)
|
||||||
|
|
||||||
bo->free_time = time.tv_sec;
|
bo->free_time = time.tv_sec;
|
||||||
list_addtail(&bo->list, &bucket->list);
|
list_addtail(&bo->list, &bucket->list);
|
||||||
fd_cleanup_bo_cache(dev, time.tv_sec);
|
fd_cleanup_bo_cache(&dev->bo_cache, time.tv_sec);
|
||||||
|
|
||||||
/* bo's in the bucket cache don't have a ref and
|
/* bo's in the bucket cache don't have a ref and
|
||||||
* don't hold a ref to the dev:
|
* don't hold a ref to the dev:
|
||||||
|
|
|
@ -43,19 +43,19 @@ struct fd_device * kgsl_device_new(int fd);
|
||||||
struct fd_device * msm_device_new(int fd);
|
struct fd_device * msm_device_new(int fd);
|
||||||
|
|
||||||
static void
|
static void
|
||||||
add_bucket(struct fd_device *dev, int size)
|
add_bucket(struct fd_bo_cache *cache, int size)
|
||||||
{
|
{
|
||||||
unsigned int i = dev->num_buckets;
|
unsigned int i = cache->num_buckets;
|
||||||
|
|
||||||
assert(i < ARRAY_SIZE(dev->cache_bucket));
|
assert(i < ARRAY_SIZE(cache->cache_bucket));
|
||||||
|
|
||||||
list_inithead(&dev->cache_bucket[i].list);
|
list_inithead(&cache->cache_bucket[i].list);
|
||||||
dev->cache_bucket[i].size = size;
|
cache->cache_bucket[i].size = size;
|
||||||
dev->num_buckets++;
|
cache->num_buckets++;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
init_cache_buckets(struct fd_device *dev)
|
fd_bo_cache_init(struct fd_bo_cache *cache)
|
||||||
{
|
{
|
||||||
unsigned long size, cache_max_size = 64 * 1024 * 1024;
|
unsigned long size, cache_max_size = 64 * 1024 * 1024;
|
||||||
|
|
||||||
|
@ -67,16 +67,16 @@ init_cache_buckets(struct fd_device *dev)
|
||||||
* width/height alignment and rounding of sizes to pages will
|
* width/height alignment and rounding of sizes to pages will
|
||||||
* get us useful cache hit rates anyway)
|
* get us useful cache hit rates anyway)
|
||||||
*/
|
*/
|
||||||
add_bucket(dev, 4096);
|
add_bucket(cache, 4096);
|
||||||
add_bucket(dev, 4096 * 2);
|
add_bucket(cache, 4096 * 2);
|
||||||
add_bucket(dev, 4096 * 3);
|
add_bucket(cache, 4096 * 3);
|
||||||
|
|
||||||
/* Initialize the linked lists for BO reuse cache. */
|
/* Initialize the linked lists for BO reuse cache. */
|
||||||
for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
|
for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
|
||||||
add_bucket(dev, size);
|
add_bucket(cache, size);
|
||||||
add_bucket(dev, size + size * 1 / 4);
|
add_bucket(cache, size + size * 1 / 4);
|
||||||
add_bucket(dev, size + size * 2 / 4);
|
add_bucket(cache, size + size * 2 / 4);
|
||||||
add_bucket(dev, size + size * 3 / 4);
|
add_bucket(cache, size + size * 3 / 4);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -113,7 +113,7 @@ struct fd_device * fd_device_new(int fd)
|
||||||
dev->fd = fd;
|
dev->fd = fd;
|
||||||
dev->handle_table = drmHashCreate();
|
dev->handle_table = drmHashCreate();
|
||||||
dev->name_table = drmHashCreate();
|
dev->name_table = drmHashCreate();
|
||||||
init_cache_buckets(dev);
|
fd_bo_cache_init(&dev->bo_cache);
|
||||||
|
|
||||||
return dev;
|
return dev;
|
||||||
}
|
}
|
||||||
|
@ -137,7 +137,7 @@ struct fd_device * fd_device_ref(struct fd_device *dev)
|
||||||
|
|
||||||
static void fd_device_del_impl(struct fd_device *dev)
|
static void fd_device_del_impl(struct fd_device *dev)
|
||||||
{
|
{
|
||||||
fd_cleanup_bo_cache(dev, 0);
|
fd_cleanup_bo_cache(&dev->bo_cache, 0);
|
||||||
drmHashDestroy(dev->handle_table);
|
drmHashDestroy(dev->handle_table);
|
||||||
drmHashDestroy(dev->name_table);
|
drmHashDestroy(dev->name_table);
|
||||||
if (dev->closefd)
|
if (dev->closefd)
|
||||||
|
|
|
@ -68,6 +68,12 @@ struct fd_bo_bucket {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct fd_bo_cache {
|
||||||
|
struct fd_bo_bucket cache_bucket[14 * 4];
|
||||||
|
int num_buckets;
|
||||||
|
time_t time;
|
||||||
|
};
|
||||||
|
|
||||||
struct fd_device {
|
struct fd_device {
|
||||||
int fd;
|
int fd;
|
||||||
atomic_t refcnt;
|
atomic_t refcnt;
|
||||||
|
@ -85,14 +91,12 @@ struct fd_device {
|
||||||
|
|
||||||
const struct fd_device_funcs *funcs;
|
const struct fd_device_funcs *funcs;
|
||||||
|
|
||||||
struct fd_bo_bucket cache_bucket[14 * 4];
|
struct fd_bo_cache bo_cache;
|
||||||
int num_buckets;
|
|
||||||
time_t time;
|
|
||||||
|
|
||||||
int closefd; /* call close(fd) upon destruction */
|
int closefd; /* call close(fd) upon destruction */
|
||||||
};
|
};
|
||||||
|
|
||||||
drm_private void fd_cleanup_bo_cache(struct fd_device *dev, time_t time);
|
drm_private void fd_cleanup_bo_cache(struct fd_bo_cache *cache, time_t time);
|
||||||
|
|
||||||
/* for where @table_lock is already held: */
|
/* for where @table_lock is already held: */
|
||||||
drm_private void fd_device_del_locked(struct fd_device *dev);
|
drm_private void fd_device_del_locked(struct fd_device *dev);
|
||||||
|
|
Loading…
Reference in New Issue