parent
b983b054d4
commit
d0dae26ca4
|
@ -5,6 +5,7 @@ AM_CFLAGS = \
|
|||
$(WARN_CFLAGS) \
|
||||
-I$(top_srcdir) \
|
||||
$(PTHREADSTUBS_CFLAGS) \
|
||||
$(VALGRIND_CFLAGS) \
|
||||
-I$(top_srcdir)/include/drm
|
||||
|
||||
libdrm_freedreno_la_LTLIBRARIES = libdrm_freedreno.la
|
||||
|
|
|
@ -102,6 +102,8 @@ fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
|
|||
bo->bo_reuse = TRUE;
|
||||
pthread_mutex_unlock(&table_lock);
|
||||
|
||||
VG_BO_ALLOC(bo);
|
||||
|
||||
return bo;
|
||||
}
|
||||
|
||||
|
@ -118,6 +120,8 @@ fd_bo_from_handle(struct fd_device *dev, uint32_t handle, uint32_t size)
|
|||
|
||||
bo = bo_from_handle(dev, size, handle);
|
||||
|
||||
VG_BO_ALLOC(bo);
|
||||
|
||||
out_unlock:
|
||||
pthread_mutex_unlock(&table_lock);
|
||||
|
||||
|
@ -147,6 +151,8 @@ fd_bo_from_dmabuf(struct fd_device *dev, int fd)
|
|||
|
||||
bo = bo_from_handle(dev, size, handle);
|
||||
|
||||
VG_BO_ALLOC(bo);
|
||||
|
||||
out_unlock:
|
||||
pthread_mutex_unlock(&table_lock);
|
||||
|
||||
|
@ -177,8 +183,10 @@ struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name)
|
|||
goto out_unlock;
|
||||
|
||||
bo = bo_from_handle(dev, req.size, req.handle);
|
||||
if (bo)
|
||||
if (bo) {
|
||||
set_name(bo, name);
|
||||
VG_BO_ALLOC(bo);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
pthread_mutex_unlock(&table_lock);
|
||||
|
@ -213,6 +221,8 @@ out:
|
|||
/* Called under table_lock */
|
||||
drm_private void bo_del(struct fd_bo *bo)
|
||||
{
|
||||
VG_BO_FREE(bo);
|
||||
|
||||
if (bo->map)
|
||||
drm_munmap(bo->map, bo->size);
|
||||
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
#include "freedreno_drmif.h"
|
||||
#include "freedreno_priv.h"
|
||||
|
||||
|
||||
drm_private void bo_del(struct fd_bo *bo);
|
||||
drm_private extern pthread_mutex_t table_lock;
|
||||
|
||||
|
@ -102,6 +101,7 @@ fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time)
|
|||
if (time && ((time - bo->free_time) <= 1))
|
||||
break;
|
||||
|
||||
VG_BO_OBTAIN(bo);
|
||||
list_del(&bo->list);
|
||||
bo_del(bo);
|
||||
}
|
||||
|
@ -177,6 +177,7 @@ retry:
|
|||
*size = bucket->size;
|
||||
bo = find_in_bucket(bucket, flags);
|
||||
if (bo) {
|
||||
VG_BO_OBTAIN(bo);
|
||||
if (bo->funcs->madvise(bo, TRUE) <= 0) {
|
||||
/* we've lost the backing pages, delete and try again: */
|
||||
pthread_mutex_lock(&table_lock);
|
||||
|
@ -207,6 +208,7 @@ fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo)
|
|||
clock_gettime(CLOCK_MONOTONIC, &time);
|
||||
|
||||
bo->free_time = time.tv_sec;
|
||||
VG_BO_RELEASE(bo);
|
||||
list_addtail(&bo->list, &bucket->list);
|
||||
fd_bo_cache_cleanup(cache, time.tv_sec);
|
||||
|
||||
|
|
|
@ -102,6 +102,9 @@ struct fd_device {
|
|||
struct fd_bo_cache bo_cache;
|
||||
|
||||
int closefd; /* call close(fd) upon destruction */
|
||||
|
||||
/* just for valgrind: */
|
||||
int bo_size;
|
||||
};
|
||||
|
||||
drm_private void fd_bo_cache_init(struct fd_bo_cache *cache, int coarse);
|
||||
|
@ -196,4 +199,57 @@ offset_bytes(void *end, void *start)
|
|||
return ((char *)end) - ((char *)start);
|
||||
}
|
||||
|
||||
#ifdef HAVE_VALGRIND
|
||||
# include <memcheck.h>
|
||||
|
||||
/*
|
||||
* For tracking the backing memory (if valgrind enabled, we force a mmap
|
||||
* for the purposes of tracking)
|
||||
*/
|
||||
static inline void VG_BO_ALLOC(struct fd_bo *bo)
|
||||
{
|
||||
if (bo && RUNNING_ON_VALGRIND) {
|
||||
VALGRIND_MALLOCLIKE_BLOCK(fd_bo_map(bo), bo->size, 0, 1);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void VG_BO_FREE(struct fd_bo *bo)
|
||||
{
|
||||
VALGRIND_FREELIKE_BLOCK(bo->map, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* For tracking bo structs that are in the buffer-cache, so that valgrind
|
||||
* doesn't attribute ownership to the first one to allocate the recycled
|
||||
* bo.
|
||||
*
|
||||
* Note that the list_head in fd_bo is used to track the buffers in cache
|
||||
* so disable error reporting on the range while they are in cache so
|
||||
* valgrind doesn't squawk about list traversal.
|
||||
*
|
||||
*/
|
||||
static inline void VG_BO_RELEASE(struct fd_bo *bo)
|
||||
{
|
||||
if (RUNNING_ON_VALGRIND) {
|
||||
VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, bo->dev->bo_size);
|
||||
VALGRIND_MAKE_MEM_NOACCESS(bo, bo->dev->bo_size);
|
||||
VALGRIND_FREELIKE_BLOCK(bo->map, 0);
|
||||
}
|
||||
}
|
||||
static inline void VG_BO_OBTAIN(struct fd_bo *bo)
|
||||
{
|
||||
if (RUNNING_ON_VALGRIND) {
|
||||
VALGRIND_MAKE_MEM_DEFINED(bo, bo->dev->bo_size);
|
||||
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, bo->dev->bo_size);
|
||||
VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, 1);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline void VG_BO_ALLOC(struct fd_bo *bo) {}
|
||||
static inline void VG_BO_FREE(struct fd_bo *bo) {}
|
||||
static inline void VG_BO_RELEASE(struct fd_bo *bo) {}
|
||||
static inline void VG_BO_OBTAIN(struct fd_bo *bo) {}
|
||||
#endif
|
||||
|
||||
|
||||
#endif /* FREEDRENO_PRIV_H_ */
|
||||
|
|
|
@ -61,5 +61,7 @@ drm_private struct fd_device * kgsl_device_new(int fd)
|
|||
dev = &kgsl_dev->base;
|
||||
dev->funcs = &funcs;
|
||||
|
||||
dev->bo_size = sizeof(struct kgsl_bo);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
|
|
@ -64,5 +64,7 @@ drm_private struct fd_device * msm_device_new(int fd)
|
|||
|
||||
fd_bo_cache_init(&msm_dev->ring_cache, TRUE);
|
||||
|
||||
dev->bo_size = sizeof(struct msm_bo);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue