Fence object reference / dereference cleanup.
Buffer object dereference cleanup. Add a struct drm_device member to fence objects: This can simplify code, particularly in drivers.main
parent
e26ec51146
commit
00f1a66f22
|
@ -269,31 +269,25 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t * bo,
|
|||
int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
|
||||
int no_wait)
|
||||
{
|
||||
|
||||
drm_fence_object_t *fence = bo->fence;
|
||||
int ret;
|
||||
|
||||
DRM_ASSERT_LOCKED(&bo->mutex);
|
||||
|
||||
if (fence) {
|
||||
drm_device_t *dev = bo->dev;
|
||||
if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) {
|
||||
drm_fence_usage_deref_unlocked(dev, fence);
|
||||
bo->fence = NULL;
|
||||
if (bo->fence) {
|
||||
if (drm_fence_object_signaled(bo->fence, bo->fence_type, 0)) {
|
||||
drm_fence_usage_deref_unlocked(&bo->fence);
|
||||
return 0;
|
||||
}
|
||||
if (no_wait) {
|
||||
return -EBUSY;
|
||||
}
|
||||
ret =
|
||||
drm_fence_object_wait(dev, fence, lazy, ignore_signals,
|
||||
drm_fence_object_wait(bo->fence, lazy, ignore_signals,
|
||||
bo->fence_type);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_fence_usage_deref_unlocked(dev, fence);
|
||||
bo->fence = NULL;
|
||||
|
||||
drm_fence_usage_deref_unlocked(&bo->fence);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -321,10 +315,8 @@ static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors)
|
|||
"Evicting buffer.\n");
|
||||
}
|
||||
}
|
||||
if (bo->fence) {
|
||||
drm_fence_usage_deref_unlocked(dev, bo->fence);
|
||||
bo->fence = NULL;
|
||||
}
|
||||
if (bo->fence)
|
||||
drm_fence_usage_deref_unlocked(&bo->fence);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -348,11 +340,9 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all)
|
|||
|
||||
DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
|
||||
|
||||
if (bo->fence && drm_fence_object_signaled(dev, bo->fence,
|
||||
bo->fence_type, 0)) {
|
||||
drm_fence_usage_deref_unlocked(dev, bo->fence);
|
||||
bo->fence = NULL;
|
||||
}
|
||||
if (bo->fence && drm_fence_object_signaled(bo->fence,
|
||||
bo->fence_type, 0))
|
||||
drm_fence_usage_deref_unlocked(&bo->fence);
|
||||
|
||||
if (bo->fence && remove_all)
|
||||
(void)drm_bo_expire_fence(bo, 0);
|
||||
|
@ -383,7 +373,7 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all)
|
|||
}
|
||||
|
||||
if (list_empty(&bo->ddestroy)) {
|
||||
drm_fence_object_flush(dev, bo->fence, bo->fence_type);
|
||||
drm_fence_object_flush(bo->fence, bo->fence_type);
|
||||
list_add_tail(&bo->ddestroy, &bm->ddestroy);
|
||||
schedule_delayed_work(&bm->wq,
|
||||
((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
|
||||
|
@ -503,12 +493,15 @@ static void drm_bo_delayed_workqueue(struct work_struct *work)
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
|
||||
void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo)
|
||||
{
|
||||
DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
|
||||
struct drm_buffer_object *tmp_bo = *bo;
|
||||
bo = NULL;
|
||||
|
||||
if (atomic_dec_and_test(&bo->usage)) {
|
||||
drm_bo_destroy_locked(bo);
|
||||
DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
|
||||
|
||||
if (atomic_dec_and_test(&tmp_bo->usage)) {
|
||||
drm_bo_destroy_locked(tmp_bo);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -520,17 +513,19 @@ static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
|
|||
DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
|
||||
|
||||
drm_bo_takedown_vm_locked(bo);
|
||||
drm_bo_usage_deref_locked(bo);
|
||||
drm_bo_usage_deref_locked(&bo);
|
||||
}
|
||||
|
||||
static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo)
|
||||
static void drm_bo_usage_deref_unlocked(drm_buffer_object_t ** bo)
|
||||
{
|
||||
drm_device_t *dev = bo->dev;
|
||||
struct drm_buffer_object *tmp_bo = *bo;
|
||||
drm_device_t *dev = tmp_bo->dev;
|
||||
|
||||
if (atomic_dec_and_test(&bo->usage)) {
|
||||
*bo = NULL;
|
||||
if (atomic_dec_and_test(&tmp_bo->usage)) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (atomic_read(&bo->usage) == 0)
|
||||
drm_bo_destroy_locked(bo);
|
||||
if (atomic_read(&tmp_bo->usage) == 0)
|
||||
drm_bo_destroy_locked(tmp_bo);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
}
|
||||
|
@ -616,16 +611,15 @@ int drm_fence_buffer_objects(drm_file_t * priv,
|
|||
if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
|
||||
count++;
|
||||
if (entry->fence)
|
||||
drm_fence_usage_deref_locked(dev, entry->fence);
|
||||
entry->fence = fence;
|
||||
atomic_inc(&fence->usage);
|
||||
drm_fence_usage_deref_locked(&entry->fence);
|
||||
entry->fence = drm_fence_reference_locked(fence);
|
||||
DRM_FLAG_MASKED(entry->priv_flags, 0,
|
||||
_DRM_BO_FLAG_UNFENCED);
|
||||
DRM_WAKEUP(&entry->event_queue);
|
||||
drm_bo_add_to_lru(entry);
|
||||
}
|
||||
mutex_unlock(&entry->mutex);
|
||||
drm_bo_usage_deref_locked(entry);
|
||||
drm_bo_usage_deref_locked(&entry);
|
||||
l = f_list.next;
|
||||
}
|
||||
DRM_DEBUG("Fenced %d buffers\n", count);
|
||||
|
@ -742,7 +736,7 @@ static int drm_bo_mem_force_space(drm_device_t * dev,
|
|||
|
||||
ret = drm_bo_evict(entry, mem_type, no_wait);
|
||||
mutex_unlock(&entry->mutex);
|
||||
drm_bo_usage_deref_unlocked(entry);
|
||||
drm_bo_usage_deref_unlocked(&entry);
|
||||
if (ret)
|
||||
return ret;
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
@ -962,10 +956,8 @@ static int drm_bo_quick_busy(drm_buffer_object_t * bo)
|
|||
|
||||
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
|
||||
if (fence) {
|
||||
drm_device_t *dev = bo->dev;
|
||||
if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) {
|
||||
drm_fence_usage_deref_unlocked(dev, fence);
|
||||
bo->fence = NULL;
|
||||
if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
|
||||
drm_fence_usage_deref_unlocked(&bo->fence);
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
|
@ -984,16 +976,13 @@ static int drm_bo_busy(drm_buffer_object_t * bo)
|
|||
|
||||
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
|
||||
if (fence) {
|
||||
drm_device_t *dev = bo->dev;
|
||||
if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) {
|
||||
drm_fence_usage_deref_unlocked(dev, fence);
|
||||
bo->fence = NULL;
|
||||
if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
|
||||
drm_fence_usage_deref_unlocked(&bo->fence);
|
||||
return 0;
|
||||
}
|
||||
drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
|
||||
if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) {
|
||||
drm_fence_usage_deref_unlocked(dev, fence);
|
||||
bo->fence = NULL;
|
||||
drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
|
||||
if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
|
||||
drm_fence_usage_deref_unlocked(&bo->fence);
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
|
@ -1190,7 +1179,7 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
|
|||
drm_bo_fill_rep_arg(bo, rep);
|
||||
out:
|
||||
mutex_unlock(&bo->mutex);
|
||||
drm_bo_usage_deref_unlocked(bo);
|
||||
drm_bo_usage_deref_unlocked(&bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1216,7 +1205,7 @@ static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
|
|||
}
|
||||
|
||||
drm_remove_ref_object(priv, ro);
|
||||
drm_bo_usage_deref_locked(bo);
|
||||
drm_bo_usage_deref_locked(&bo);
|
||||
out:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
|
@ -1512,7 +1501,7 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
|
|||
|
||||
mutex_unlock(&bo->mutex);
|
||||
|
||||
drm_bo_usage_deref_unlocked(bo);
|
||||
drm_bo_usage_deref_unlocked(&bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1534,7 +1523,7 @@ static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
|
|||
(void)drm_bo_busy(bo);
|
||||
drm_bo_fill_rep_arg(bo, rep);
|
||||
mutex_unlock(&bo->mutex);
|
||||
drm_bo_usage_deref_unlocked(bo);
|
||||
drm_bo_usage_deref_unlocked(&bo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1566,7 +1555,7 @@ static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
|
|||
|
||||
out:
|
||||
mutex_unlock(&bo->mutex);
|
||||
drm_bo_usage_deref_unlocked(bo);
|
||||
drm_bo_usage_deref_unlocked(&bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1651,7 +1640,7 @@ int drm_buffer_object_create(drm_device_t *dev,
|
|||
out_err:
|
||||
mutex_unlock(&bo->mutex);
|
||||
|
||||
drm_bo_usage_deref_unlocked(bo);
|
||||
drm_bo_usage_deref_unlocked(&bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1728,7 +1717,7 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS)
|
|||
mask &
|
||||
DRM_BO_FLAG_SHAREABLE);
|
||||
if (rep.ret)
|
||||
drm_bo_usage_deref_unlocked(entry);
|
||||
drm_bo_usage_deref_unlocked(&entry);
|
||||
|
||||
if (rep.ret)
|
||||
break;
|
||||
|
@ -1957,7 +1946,7 @@ restart:
|
|||
allow_errors);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
drm_bo_usage_deref_locked(entry);
|
||||
drm_bo_usage_deref_locked(&entry);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1967,10 +1956,8 @@ restart:
|
|||
|
||||
do_restart = ((next->prev != list) && (next->prev != prev));
|
||||
|
||||
if (nentry != NULL && do_restart) {
|
||||
drm_bo_usage_deref_locked(nentry);
|
||||
nentry = NULL;
|
||||
}
|
||||
if (nentry != NULL && do_restart)
|
||||
drm_bo_usage_deref_locked(&nentry);
|
||||
|
||||
if (do_restart)
|
||||
goto restart;
|
||||
|
@ -2365,7 +2352,7 @@ static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo)
|
|||
drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
|
||||
list->map = NULL;
|
||||
list->user_token = 0ULL;
|
||||
drm_bo_usage_deref_locked(bo);
|
||||
drm_bo_usage_deref_locked(&bo);
|
||||
}
|
||||
|
||||
static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo)
|
||||
|
|
|
@ -306,7 +306,7 @@ int drm_buffer_object_transfer(drm_buffer_object_t * bo,
|
|||
INIT_LIST_HEAD(&fbo->p_mm_list);
|
||||
#endif
|
||||
|
||||
atomic_inc(&bo->fence->usage);
|
||||
drm_fence_reference_unlocked(&fbo->fence, bo->fence);
|
||||
fbo->pinned_node = NULL;
|
||||
fbo->mem.mm_node->private = (void *)fbo;
|
||||
atomic_set(&fbo->usage, 1);
|
||||
|
@ -339,7 +339,7 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo,
|
|||
drm_buffer_object_t *old_obj;
|
||||
|
||||
if (bo->fence)
|
||||
drm_fence_usage_deref_unlocked(dev, bo->fence);
|
||||
drm_fence_usage_deref_unlocked(&bo->fence);
|
||||
ret = drm_fence_object_create(dev, fence_class, fence_type,
|
||||
fence_flags | DRM_FENCE_FLAG_EMIT,
|
||||
&bo->fence);
|
||||
|
@ -396,7 +396,7 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo,
|
|||
DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
|
||||
drm_bo_add_to_lru(old_obj);
|
||||
|
||||
drm_bo_usage_deref_locked(old_obj);
|
||||
drm_bo_usage_deref_locked(&old_obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
}
|
||||
|
|
|
@ -124,56 +124,76 @@ static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
|
|||
write_unlock_irqrestore(&fm->lock, flags);
|
||||
}
|
||||
|
||||
void drm_fence_usage_deref_locked(drm_device_t * dev,
|
||||
drm_fence_object_t * fence)
|
||||
void drm_fence_usage_deref_locked(drm_fence_object_t ** fence)
|
||||
{
|
||||
struct drm_fence_object *tmp_fence = *fence;
|
||||
struct drm_device *dev = tmp_fence->dev;
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
|
||||
DRM_ASSERT_LOCKED(&dev->struct_mutex);
|
||||
|
||||
if (atomic_dec_and_test(&fence->usage)) {
|
||||
drm_fence_unring(dev, &fence->ring);
|
||||
*fence = NULL;
|
||||
if (atomic_dec_and_test(&tmp_fence->usage)) {
|
||||
drm_fence_unring(dev, &tmp_fence->ring);
|
||||
DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
|
||||
fence->base.hash.key);
|
||||
tmp_fence->base.hash.key);
|
||||
atomic_dec(&fm->count);
|
||||
BUG_ON(!list_empty(&fence->base.list));
|
||||
drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE);
|
||||
BUG_ON(!list_empty(&tmp_fence->base.list));
|
||||
drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
|
||||
}
|
||||
}
|
||||
|
||||
void drm_fence_usage_deref_unlocked(drm_device_t * dev,
|
||||
drm_fence_object_t * fence)
|
||||
void drm_fence_usage_deref_unlocked(drm_fence_object_t ** fence)
|
||||
{
|
||||
struct drm_fence_object *tmp_fence = *fence;
|
||||
struct drm_device *dev = tmp_fence->dev;
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
|
||||
if (atomic_dec_and_test(&fence->usage)) {
|
||||
*fence = NULL;
|
||||
if (atomic_dec_and_test(&tmp_fence->usage)) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (atomic_read(&fence->usage) == 0) {
|
||||
drm_fence_unring(dev, &fence->ring);
|
||||
if (atomic_read(&tmp_fence->usage) == 0) {
|
||||
drm_fence_unring(dev, &tmp_fence->ring);
|
||||
atomic_dec(&fm->count);
|
||||
BUG_ON(!list_empty(&fence->base.list));
|
||||
drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE);
|
||||
BUG_ON(!list_empty(&tmp_fence->base.list));
|
||||
drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static void drm_fence_object_destroy(drm_file_t * priv,
|
||||
drm_user_object_t * base)
|
||||
struct drm_fence_object
|
||||
*drm_fence_reference_locked(struct drm_fence_object *src)
|
||||
{
|
||||
DRM_ASSERT_LOCKED(&src->dev->struct_mutex);
|
||||
|
||||
atomic_inc(&src->usage);
|
||||
return src;
|
||||
}
|
||||
|
||||
void drm_fence_reference_unlocked(struct drm_fence_object **dst,
|
||||
struct drm_fence_object *src)
|
||||
{
|
||||
mutex_lock(&src->dev->struct_mutex);
|
||||
*dst = src;
|
||||
atomic_inc(&src->usage);
|
||||
mutex_unlock(&src->dev->struct_mutex);
|
||||
}
|
||||
|
||||
|
||||
static void drm_fence_object_destroy(drm_file_t *priv, drm_user_object_t * base)
|
||||
{
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
drm_fence_object_t *fence =
|
||||
drm_user_object_entry(base, drm_fence_object_t, base);
|
||||
|
||||
drm_fence_usage_deref_locked(dev, fence);
|
||||
drm_fence_usage_deref_locked(&fence);
|
||||
}
|
||||
|
||||
int drm_fence_object_signaled(drm_device_t * dev,
|
||||
drm_fence_object_t * fence,
|
||||
int drm_fence_object_signaled(drm_fence_object_t * fence,
|
||||
uint32_t mask, int poke_flush)
|
||||
{
|
||||
unsigned long flags;
|
||||
int signaled;
|
||||
struct drm_device *dev = fence->dev;
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
drm_fence_driver_t *driver = dev->driver->fence_driver;
|
||||
|
||||
|
@ -204,10 +224,10 @@ static void drm_fence_flush_exe(drm_fence_class_manager_t * fc,
|
|||
}
|
||||
}
|
||||
|
||||
int drm_fence_object_flush(drm_device_t * dev,
|
||||
drm_fence_object_t * fence,
|
||||
int drm_fence_object_flush(drm_fence_object_t * fence,
|
||||
uint32_t type)
|
||||
{
|
||||
struct drm_device *dev = fence->dev;
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
drm_fence_class_manager_t *fc = &fm->class[fence->class];
|
||||
drm_fence_driver_t *driver = dev->driver->fence_driver;
|
||||
|
@ -270,24 +290,23 @@ void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence)
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
return;
|
||||
}
|
||||
fence = list_entry(fc->ring.next, drm_fence_object_t, ring);
|
||||
atomic_inc(&fence->usage);
|
||||
fence = drm_fence_reference_locked(list_entry(fc->ring.next, drm_fence_object_t, ring));
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
diff = (old_sequence - fence->sequence) & driver->sequence_mask;
|
||||
read_unlock_irqrestore(&fm->lock, flags);
|
||||
if (diff < driver->wrap_diff) {
|
||||
drm_fence_object_flush(dev, fence, fence->type);
|
||||
drm_fence_object_flush(fence, fence->type);
|
||||
}
|
||||
drm_fence_usage_deref_unlocked(dev, fence);
|
||||
drm_fence_usage_deref_unlocked(&fence);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_fence_flush_old);
|
||||
|
||||
static int drm_fence_lazy_wait(drm_device_t *dev,
|
||||
drm_fence_object_t *fence,
|
||||
static int drm_fence_lazy_wait(drm_fence_object_t *fence,
|
||||
int ignore_signals,
|
||||
uint32_t mask)
|
||||
{
|
||||
struct drm_device *dev = fence->dev;
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
drm_fence_class_manager_t *fc = &fm->class[fence->class];
|
||||
int signaled;
|
||||
|
@ -296,13 +315,13 @@ static int drm_fence_lazy_wait(drm_device_t *dev,
|
|||
|
||||
do {
|
||||
DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
|
||||
(signaled = drm_fence_object_signaled(dev, fence, mask, 1)));
|
||||
(signaled = drm_fence_object_signaled(fence, mask, 1)));
|
||||
if (signaled)
|
||||
return 0;
|
||||
if (time_after_eq(jiffies, _end))
|
||||
break;
|
||||
} while (ret == -EINTR && ignore_signals);
|
||||
if (drm_fence_object_signaled(dev, fence, mask, 0))
|
||||
if (drm_fence_object_signaled(fence, mask, 0))
|
||||
return 0;
|
||||
if (time_after_eq(jiffies, _end))
|
||||
ret = -EBUSY;
|
||||
|
@ -317,10 +336,10 @@ static int drm_fence_lazy_wait(drm_device_t *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int drm_fence_object_wait(drm_device_t * dev,
|
||||
drm_fence_object_t * fence,
|
||||
int drm_fence_object_wait(drm_fence_object_t * fence,
|
||||
int lazy, int ignore_signals, uint32_t mask)
|
||||
{
|
||||
struct drm_device *dev = fence->dev;
|
||||
drm_fence_driver_t *driver = dev->driver->fence_driver;
|
||||
int ret = 0;
|
||||
unsigned long _end;
|
||||
|
@ -332,16 +351,16 @@ int drm_fence_object_wait(drm_device_t * dev,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (drm_fence_object_signaled(dev, fence, mask, 0))
|
||||
if (drm_fence_object_signaled(fence, mask, 0))
|
||||
return 0;
|
||||
|
||||
_end = jiffies + 3 * DRM_HZ;
|
||||
|
||||
drm_fence_object_flush(dev, fence, mask);
|
||||
drm_fence_object_flush(fence, mask);
|
||||
|
||||
if (lazy && driver->lazy_capable) {
|
||||
|
||||
ret = drm_fence_lazy_wait(dev, fence, ignore_signals, mask);
|
||||
ret = drm_fence_lazy_wait(fence, ignore_signals, mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -349,7 +368,7 @@ int drm_fence_object_wait(drm_device_t * dev,
|
|||
|
||||
if (driver->has_irq(dev, fence->class,
|
||||
DRM_FENCE_TYPE_EXE)) {
|
||||
ret = drm_fence_lazy_wait(dev, fence, ignore_signals,
|
||||
ret = drm_fence_lazy_wait(fence, ignore_signals,
|
||||
DRM_FENCE_TYPE_EXE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -357,13 +376,13 @@ int drm_fence_object_wait(drm_device_t * dev,
|
|||
|
||||
if (driver->has_irq(dev, fence->class,
|
||||
mask & ~DRM_FENCE_TYPE_EXE)) {
|
||||
ret = drm_fence_lazy_wait(dev, fence, ignore_signals,
|
||||
ret = drm_fence_lazy_wait(fence, ignore_signals,
|
||||
mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
if (drm_fence_object_signaled(dev, fence, mask, 0))
|
||||
if (drm_fence_object_signaled(fence, mask, 0))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -375,7 +394,7 @@ int drm_fence_object_wait(drm_device_t * dev,
|
|||
#endif
|
||||
do {
|
||||
schedule();
|
||||
signaled = drm_fence_object_signaled(dev, fence, mask, 1);
|
||||
signaled = drm_fence_object_signaled(fence, mask, 1);
|
||||
} while (!signaled && !time_after_eq(jiffies, _end));
|
||||
|
||||
if (!signaled)
|
||||
|
@ -384,9 +403,10 @@ int drm_fence_object_wait(drm_device_t * dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
|
||||
int drm_fence_object_emit(drm_fence_object_t * fence,
|
||||
uint32_t fence_flags, uint32_t class, uint32_t type)
|
||||
{
|
||||
struct drm_device *dev = fence->dev;
|
||||
drm_fence_manager_t *fm = &dev->fm;
|
||||
drm_fence_driver_t *driver = dev->driver->fence_driver;
|
||||
drm_fence_class_manager_t *fc = &fm->class[fence->class];
|
||||
|
@ -436,9 +456,10 @@ static int drm_fence_object_init(drm_device_t * dev, uint32_t class,
|
|||
fence->submitted_flush = 0;
|
||||
fence->signaled = 0;
|
||||
fence->sequence = 0;
|
||||
fence->dev = dev;
|
||||
write_unlock_irqrestore(&fm->lock, flags);
|
||||
if (fence_flags & DRM_FENCE_FLAG_EMIT) {
|
||||
ret = drm_fence_object_emit(dev, fence, fence_flags,
|
||||
ret = drm_fence_object_emit(fence, fence_flags,
|
||||
fence->class, type);
|
||||
}
|
||||
return ret;
|
||||
|
@ -476,7 +497,7 @@ int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type,
|
|||
return -ENOMEM;
|
||||
ret = drm_fence_object_init(dev, class, type, flags, fence);
|
||||
if (ret) {
|
||||
drm_fence_usage_deref_unlocked(dev, fence);
|
||||
drm_fence_usage_deref_unlocked(&fence);
|
||||
return ret;
|
||||
}
|
||||
*c_fence = fence;
|
||||
|
@ -533,8 +554,7 @@ drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
return NULL;
|
||||
}
|
||||
fence = drm_user_object_entry(uo, drm_fence_object_t, base);
|
||||
atomic_inc(&fence->usage);
|
||||
fence = drm_fence_reference_locked(drm_user_object_entry(uo, drm_fence_object_t, base));
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return fence;
|
||||
}
|
||||
|
@ -568,7 +588,7 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
|
|||
arg.flags &
|
||||
DRM_FENCE_FLAG_SHAREABLE);
|
||||
if (ret) {
|
||||
drm_fence_usage_deref_unlocked(dev, fence);
|
||||
drm_fence_usage_deref_unlocked(&fence);
|
||||
return ret;
|
||||
}
|
||||
arg.handle = fence->base.hash.key;
|
||||
|
@ -603,14 +623,14 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
|
|||
fence = drm_lookup_fence_object(priv, arg.handle);
|
||||
if (!fence)
|
||||
return -EINVAL;
|
||||
ret = drm_fence_object_flush(dev, fence, arg.type);
|
||||
ret = drm_fence_object_flush(fence, arg.type);
|
||||
break;
|
||||
case drm_fence_wait:
|
||||
fence = drm_lookup_fence_object(priv, arg.handle);
|
||||
if (!fence)
|
||||
return -EINVAL;
|
||||
ret =
|
||||
drm_fence_object_wait(dev, fence,
|
||||
drm_fence_object_wait(fence,
|
||||
arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
|
||||
0, arg.type);
|
||||
break;
|
||||
|
@ -619,7 +639,7 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
|
|||
fence = drm_lookup_fence_object(priv, arg.handle);
|
||||
if (!fence)
|
||||
return -EINVAL;
|
||||
ret = drm_fence_object_emit(dev, fence, arg.flags, arg.class,
|
||||
ret = drm_fence_object_emit(fence, arg.flags, arg.class,
|
||||
arg.type);
|
||||
break;
|
||||
case drm_fence_buffers:
|
||||
|
@ -647,7 +667,7 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
|
|||
arg.type = fence->type;
|
||||
arg.signaled = fence->signaled;
|
||||
read_unlock_irqrestore(&fm->lock, flags);
|
||||
drm_fence_usage_deref_unlocked(dev, fence);
|
||||
drm_fence_usage_deref_unlocked(&fence);
|
||||
|
||||
DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
|
||||
return ret;
|
||||
|
|
|
@ -141,6 +141,7 @@ extern int drm_user_object_unref(drm_file_t * priv, uint32_t user_token,
|
|||
|
||||
typedef struct drm_fence_object {
|
||||
drm_user_object_t base;
|
||||
struct drm_device *dev;
|
||||
atomic_t usage;
|
||||
|
||||
/*
|
||||
|
@ -196,17 +197,15 @@ extern void drm_fence_manager_init(struct drm_device *dev);
|
|||
extern void drm_fence_manager_takedown(struct drm_device *dev);
|
||||
extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class,
|
||||
uint32_t sequence);
|
||||
extern int drm_fence_object_flush(struct drm_device *dev,
|
||||
drm_fence_object_t * fence, uint32_t type);
|
||||
extern int drm_fence_object_signaled(struct drm_device *dev,
|
||||
drm_fence_object_t * fence,
|
||||
extern int drm_fence_object_flush(drm_fence_object_t * fence, uint32_t type);
|
||||
extern int drm_fence_object_signaled(drm_fence_object_t * fence,
|
||||
uint32_t type, int flush);
|
||||
extern void drm_fence_usage_deref_locked(struct drm_device *dev,
|
||||
drm_fence_object_t * fence);
|
||||
extern void drm_fence_usage_deref_unlocked(struct drm_device *dev,
|
||||
drm_fence_object_t * fence);
|
||||
extern int drm_fence_object_wait(struct drm_device *dev,
|
||||
drm_fence_object_t * fence,
|
||||
extern void drm_fence_usage_deref_locked(drm_fence_object_t ** fence);
|
||||
extern void drm_fence_usage_deref_unlocked(drm_fence_object_t ** fence);
|
||||
extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src);
|
||||
extern void drm_fence_reference_unlocked(struct drm_fence_object **dst,
|
||||
struct drm_fence_object *src);
|
||||
extern int drm_fence_object_wait(drm_fence_object_t * fence,
|
||||
int lazy, int ignore_signals, uint32_t mask);
|
||||
extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
|
||||
uint32_t fence_flags, uint32_t class,
|
||||
|
@ -441,7 +440,7 @@ extern int drm_bo_pci_offset(struct drm_device *dev,
|
|||
unsigned long *bus_size);
|
||||
extern int drm_mem_reg_is_pci(struct drm_device *dev, drm_bo_mem_reg_t * mem);
|
||||
|
||||
extern void drm_bo_usage_deref_locked(drm_buffer_object_t * bo);
|
||||
extern void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo);
|
||||
extern int drm_fence_buffer_objects(drm_file_t * priv,
|
||||
struct list_head *list,
|
||||
uint32_t fence_flags,
|
||||
|
|
|
@ -840,7 +840,8 @@ static void drm_bo_vm_close(struct vm_area_struct *vma)
|
|||
#ifdef DRM_ODD_MM_COMPAT
|
||||
drm_bo_delete_vma(bo, vma);
|
||||
#endif
|
||||
drm_bo_usage_deref_locked(bo);
|
||||
drm_bo_usage_deref_locked((struct drm_buffer_object **)
|
||||
&vma->vm_private_data);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
return;
|
||||
|
|
Loading…
Reference in New Issue