[gem] Remove the drm_client_lock_take in set_domain.

We no longer need to use it to protect against shared ringbuffer access.
main
Eric Anholt 2008-06-11 16:19:23 -07:00
parent 57b4c4c32d
commit b2606e325a
3 changed files with 2 additions and 64 deletions

View File

@ -1169,9 +1169,6 @@ extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
extern void drm_idlelock_take(struct drm_lock_data *lock_data);
extern void drm_idlelock_release(struct drm_lock_data *lock_data);
extern int drm_client_lock_take(struct drm_device *dev,
struct drm_file *file_priv);
extern void drm_client_lock_release(struct drm_device *dev);
/*
* These are exported to drivers so that they can implement fencing using

View File

@ -375,62 +375,6 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
}
EXPORT_SYMBOL(drm_idlelock_release);
/**
* Takes the lock on behalf of the client if needed, using the kernel context.
*
* This allows us to hide the hardware lock when it's required for protection
* of data structures (such as command ringbuffer) shared with the X Server, and
* a way for us to transition to lockless for those requests when the X Server
* stops accessing the ringbuffer directly, without having to update the
* other userland clients.
*/
int drm_client_lock_take(struct drm_device *dev, struct drm_file *file_priv)
{
int ret;
unsigned long irqflags;
/* If the client has the lock, we're already done. */
if (drm_i_have_hw_lock(dev, file_priv))
return 0;
mutex_unlock (&dev->struct_mutex);
/* Client doesn't hold the lock. Block taking the lock with the kernel
* context on behalf of the client, and return whether we were
* successful.
*/
spin_lock_irqsave(&dev->lock.spinlock, irqflags);
dev->lock.user_waiters++;
spin_unlock_irqrestore(&dev->lock.spinlock, irqflags);
ret = wait_event_interruptible(dev->lock.lock_queue,
drm_lock_take(&dev->lock,
DRM_KERNEL_CONTEXT));
spin_lock_irqsave(&dev->lock.spinlock, irqflags);
dev->lock.user_waiters--;
if (ret != 0) {
spin_unlock_irqrestore(&dev->lock.spinlock, irqflags);
} else {
dev->lock.file_priv = file_priv;
dev->lock.lock_time = jiffies;
dev->lock.kernel_held = 1;
file_priv->lock_count++;
spin_unlock_irqrestore(&dev->lock.spinlock, irqflags);
}
mutex_lock (&dev->struct_mutex);
return ret;
}
EXPORT_SYMBOL(drm_client_lock_take);
void drm_client_lock_release(struct drm_device *dev)
{
if (dev->lock.kernel_held) {
dev->lock.kernel_held = 0;
dev->lock.file_priv = NULL;
drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
}
}
EXPORT_SYMBOL(drm_client_lock_release);
int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
{

View File

@ -1930,14 +1930,11 @@ i915_gem_set_domain(struct drm_gem_object *obj,
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
drm_client_lock_take(dev, file_priv);
ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
if (ret) {
drm_client_lock_release(dev);
if (ret)
return ret;
}
i915_gem_dev_set_domain(obj->dev);
drm_client_lock_release(dev);
return 0;
}