Add /proc filesystem buffer / fence object accounting.
Check for NULL pointer in the i915 flush handler. Remove i915_sync_flush declaration.main
parent
a02155a0d9
commit
273eb7833d
|
@ -789,6 +789,7 @@ typedef struct drm_fence_manager{
|
|||
int pending_exe_flush;
|
||||
uint32_t last_exe_flush;
|
||||
uint32_t exe_flush_sequence;
|
||||
atomic_t count;
|
||||
} drm_fence_manager_t;
|
||||
|
||||
typedef struct drm_buffer_manager{
|
||||
|
@ -809,6 +810,7 @@ typedef struct drm_buffer_manager{
|
|||
uint32_t fence_type;
|
||||
unsigned long max_pages;
|
||||
unsigned long cur_pages;
|
||||
atomic_t count;
|
||||
} drm_buffer_manager_t;
|
||||
|
||||
|
||||
|
@ -966,11 +968,11 @@ typedef struct drm_fence_object{
|
|||
struct list_head ring;
|
||||
int class;
|
||||
uint32_t native_type;
|
||||
volatile uint32_t type;
|
||||
volatile uint32_t signaled;
|
||||
uint32_t type;
|
||||
uint32_t signaled;
|
||||
uint32_t sequence;
|
||||
volatile uint32_t flush_mask;
|
||||
volatile uint32_t submitted_flush;
|
||||
uint32_t flush_mask;
|
||||
uint32_t submitted_flush;
|
||||
} drm_fence_object_t;
|
||||
|
||||
|
||||
|
@ -1380,13 +1382,16 @@ extern void drm_fence_manager_init(drm_device_t *dev);
|
|||
extern void drm_fence_manager_takedown(drm_device_t *dev);
|
||||
extern void drm_fence_flush_old(drm_device_t *dev, uint32_t sequence);
|
||||
extern int drm_fence_object_flush(drm_device_t * dev,
|
||||
drm_fence_object_t * fence, uint32_t type);
|
||||
extern int drm_fence_object_signaled(drm_fence_object_t * fence, uint32_t type);
|
||||
volatile drm_fence_object_t * fence,
|
||||
uint32_t type);
|
||||
extern int drm_fence_object_signaled(volatile drm_fence_object_t * fence,
|
||||
uint32_t type);
|
||||
extern void drm_fence_usage_deref_locked(drm_device_t * dev,
|
||||
drm_fence_object_t * fence);
|
||||
extern void drm_fence_usage_deref_unlocked(drm_device_t * dev,
|
||||
drm_fence_object_t * fence);
|
||||
extern int drm_fence_object_wait(drm_device_t * dev, drm_fence_object_t * fence,
|
||||
extern int drm_fence_object_wait(drm_device_t * dev,
|
||||
volatile drm_fence_object_t * fence,
|
||||
int lazy, int ignore_signals, uint32_t mask);
|
||||
extern int drm_fence_object_create(drm_device_t *dev, uint32_t type,
|
||||
uint32_t fence_flags,
|
||||
|
|
|
@ -128,6 +128,7 @@ static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo)
|
|||
if (bo->ttm_object) {
|
||||
drm_ttm_object_deref_locked(dev, bo->ttm_object);
|
||||
}
|
||||
atomic_dec(&bm->count);
|
||||
drm_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
|
||||
}
|
||||
|
||||
|
@ -140,6 +141,10 @@ static void drm_bo_delayed_delete(drm_device_t * dev)
|
|||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
/*
|
||||
* FIXME: Lock buffer object mutex.
|
||||
*/
|
||||
|
||||
list_for_each_entry_safe(entry, next, &bm->ddestroy, ddestroy) {
|
||||
fence = entry->fence;
|
||||
|
||||
|
@ -1207,7 +1212,7 @@ int drm_buffer_object_create(drm_file_t * priv,
|
|||
uint32_t new_flags;
|
||||
unsigned long num_pages;
|
||||
|
||||
drm_bo_delayed_delete(dev);
|
||||
// drm_bo_delayed_delete(dev);
|
||||
|
||||
if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
|
||||
DRM_ERROR("Invalid buffer object start.\n");
|
||||
|
@ -1259,6 +1264,7 @@ int drm_buffer_object_create(drm_file_t * priv,
|
|||
|
||||
mutex_unlock(&bo->mutex);
|
||||
*buf_obj = bo;
|
||||
atomic_inc(&bm->count);
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
|
@ -1576,6 +1582,7 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
|
|||
|
||||
INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
|
||||
bm->initialized = 1;
|
||||
atomic_set(&bm->count, 0);
|
||||
bm->cur_pages = 0;
|
||||
bm->max_pages = arg.req.max_locked_pages;
|
||||
break;
|
||||
|
|
|
@ -111,10 +111,13 @@ static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
|
|||
void drm_fence_usage_deref_locked(drm_device_t * dev,
|
||||
drm_fence_object_t * fence)
|
||||
{
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
|
||||
if (atomic_dec_and_test(&fence->usage)) {
|
||||
drm_fence_unring(dev, &fence->ring);
|
||||
DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
|
||||
fence->base.hash.key);
|
||||
atomic_dec(&fm->count);
|
||||
kmem_cache_free(drm_cache.fence_object, fence);
|
||||
}
|
||||
}
|
||||
|
@ -122,10 +125,13 @@ void drm_fence_usage_deref_locked(drm_device_t * dev,
|
|||
void drm_fence_usage_deref_unlocked(drm_device_t * dev,
|
||||
drm_fence_object_t * fence)
|
||||
{
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
|
||||
if (atomic_dec_and_test(&fence->usage)) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (atomic_read(&fence->usage) == 0) {
|
||||
drm_fence_unring(dev, &fence->ring);
|
||||
atomic_dec(&fm->count);
|
||||
kmem_cache_free(drm_cache.fence_object, fence);
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -142,7 +148,8 @@ static void drm_fence_object_destroy(drm_file_t * priv,
|
|||
drm_fence_usage_deref_locked(dev, fence);
|
||||
}
|
||||
|
||||
static int fence_signaled(drm_device_t * dev, drm_fence_object_t * fence,
|
||||
static int fence_signaled(drm_device_t * dev, volatile
|
||||
drm_fence_object_t * fence,
|
||||
uint32_t mask, int poke_flush)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -166,7 +173,7 @@ static void drm_fence_flush_exe(drm_fence_manager_t * fm,
|
|||
uint32_t diff;
|
||||
|
||||
if (!fm->pending_exe_flush) {
|
||||
struct list_head *list;
|
||||
volatile struct list_head *list;
|
||||
|
||||
/*
|
||||
* Last_exe_flush is invalid. Find oldest sequence.
|
||||
|
@ -196,18 +203,15 @@ static void drm_fence_flush_exe(drm_fence_manager_t * fm,
|
|||
}
|
||||
}
|
||||
|
||||
int drm_fence_object_signaled(drm_fence_object_t * fence, uint32_t type)
|
||||
int drm_fence_object_signaled(volatile drm_fence_object_t * fence,
|
||||
uint32_t type)
|
||||
{
|
||||
return ((fence->signaled & type) == type);
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure old fence objects are signaled before their fence sequences are
|
||||
* wrapped around and reused.
|
||||
*/
|
||||
|
||||
int drm_fence_object_flush(drm_device_t * dev,
|
||||
drm_fence_object_t * fence, uint32_t type)
|
||||
volatile drm_fence_object_t * fence,
|
||||
uint32_t type)
|
||||
{
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
drm_fence_driver_t *driver = dev->driver->fence_driver;
|
||||
|
@ -237,6 +241,12 @@ int drm_fence_object_flush(drm_device_t * dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure old fence objects are signaled before their fence sequences are
|
||||
* wrapped around and reused.
|
||||
*/
|
||||
|
||||
|
||||
void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
|
||||
{
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
|
@ -267,7 +277,8 @@ void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
|
|||
|
||||
EXPORT_SYMBOL(drm_fence_flush_old);
|
||||
|
||||
int drm_fence_object_wait(drm_device_t * dev, drm_fence_object_t * fence,
|
||||
int drm_fence_object_wait(drm_device_t * dev,
|
||||
volatile drm_fence_object_t * fence,
|
||||
int lazy, int ignore_signals, uint32_t mask)
|
||||
{
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
|
@ -426,6 +437,8 @@ int drm_fence_object_create(drm_device_t * dev, uint32_t type,
|
|||
{
|
||||
drm_fence_object_t *fence;
|
||||
int ret;
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
unsigned long fl;
|
||||
|
||||
fence = kmem_cache_alloc(drm_cache.fence_object, GFP_KERNEL);
|
||||
if (!fence)
|
||||
|
@ -436,6 +449,8 @@ int drm_fence_object_create(drm_device_t * dev, uint32_t type,
|
|||
return ret;
|
||||
}
|
||||
*c_fence = fence;
|
||||
atomic_inc(&fm->count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -448,16 +463,19 @@ void drm_fence_manager_init(drm_device_t * dev)
|
|||
int i;
|
||||
|
||||
fm->lock = RW_LOCK_UNLOCKED;
|
||||
write_lock(&fm->lock);
|
||||
INIT_LIST_HEAD(&fm->ring);
|
||||
fm->pending_flush = 0;
|
||||
DRM_INIT_WAITQUEUE(&fm->fence_queue);
|
||||
fm->initialized = 0;
|
||||
if (fed) {
|
||||
fm->initialized = 1;
|
||||
atomic_set(&fm->count,0);
|
||||
for (i = 0; i < fed->no_types; ++i) {
|
||||
fm->fence_types[i] = &fm->ring;
|
||||
}
|
||||
}
|
||||
write_unlock(&fm->lock);
|
||||
}
|
||||
|
||||
void drm_fence_manager_takedown(drm_device_t * dev)
|
||||
|
|
|
@ -49,6 +49,8 @@ static int drm_queues_info(char *buf, char **start, off_t offset,
|
|||
int request, int *eof, void *data);
|
||||
static int drm_bufs_info(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data);
|
||||
static int drm_objects_info(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data);
|
||||
#if DRM_DEBUG_CODE
|
||||
static int drm_vma_info(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data);
|
||||
|
@ -67,6 +69,7 @@ static struct drm_proc_list {
|
|||
{"clients", drm_clients_info},
|
||||
{"queues", drm_queues_info},
|
||||
{"bufs", drm_bufs_info},
|
||||
{"objects", drm_objects_info},
|
||||
#if DRM_DEBUG_CODE
|
||||
{"vma", drm_vma_info},
|
||||
#endif
|
||||
|
@ -418,6 +421,74 @@ static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when "/proc/dri/.../objects" is read.
|
||||
*
|
||||
* \param buf output buffer.
|
||||
* \param start start of output data.
|
||||
* \param offset requested start offset.
|
||||
* \param request requested number of bytes.
|
||||
* \param eof whether there is no more data to return.
|
||||
* \param data private data.
|
||||
* \return number of written bytes.
|
||||
*/
|
||||
static int drm__objects_info(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *) data;
|
||||
int len = 0;
|
||||
drm_buffer_manager_t *bm = &dev->bm;
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
|
||||
if (offset > DRM_PROC_LIMIT) {
|
||||
*eof = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*start = &buf[offset];
|
||||
*eof = 0;
|
||||
|
||||
if (fm->initialized) {
|
||||
DRM_PROC_PRINT("Number of active fence objects: %d.\n\n",
|
||||
atomic_read(&fm->count));
|
||||
} else {
|
||||
DRM_PROC_PRINT("Fence objects are not supported by this driver\n\n");
|
||||
}
|
||||
|
||||
if (bm->initialized) {
|
||||
DRM_PROC_PRINT("Number of active buffer objects: %d.\n\n",
|
||||
atomic_read(&bm->count));
|
||||
DRM_PROC_PRINT("Number of locked GATT pages: %lu.\n", bm->cur_pages);
|
||||
DRM_PROC_PRINT("Max allowed number of locked GATT pages %lu\n",
|
||||
bm->max_pages);
|
||||
} else {
|
||||
DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n\n");
|
||||
}
|
||||
|
||||
|
||||
DRM_PROC_PRINT("\n");
|
||||
|
||||
if (len > request + offset)
|
||||
return request;
|
||||
*eof = 1;
|
||||
return len - offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* Simply calls _objects_info() while holding the drm_device::struct_mutex lock.
|
||||
*/
|
||||
static int drm_objects_info(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *) data;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = drm__objects_info(buf, start, offset, request, eof, data);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when "/proc/dri/.../clients" is read.
|
||||
*
|
||||
|
|
|
@ -49,6 +49,9 @@ static void i915_perform_flush(drm_device_t * dev)
|
|||
uint32_t diff;
|
||||
uint32_t sequence;
|
||||
|
||||
if (!dev_priv)
|
||||
return;
|
||||
|
||||
if (fm->pending_exe_flush) {
|
||||
sequence = READ_BREADCRUMB(dev_priv);
|
||||
|
||||
|
|
|
@ -164,7 +164,6 @@ extern int i915_fence_emit_sequence(drm_device_t *dev, uint32_t flags,
|
|||
uint32_t *sequence,
|
||||
uint32_t *native_type);
|
||||
extern void i915_poke_flush(drm_device_t *dev);
|
||||
extern void i915_sync_flush(drm_device_t *dev);
|
||||
#endif
|
||||
|
||||
#ifdef I915_HAVE_BUFFER
|
||||
|
|
Loading…
Reference in New Issue