[intel-gem] Replace idlelock usage with real lock acquisition.

main
Eric Anholt 2008-05-27 17:50:39 -07:00
parent 1f4e36081b
commit e10502002f
5 changed files with 77 additions and 18 deletions

View File

@ -474,6 +474,11 @@ struct drm_lock_data {
uint32_t kernel_waiters;
uint32_t user_waiters;
int idle_has_lock;
/**
* Boolean signaling that the lock is held on behalf of the
* file_priv client by the kernel in an ioctl handler.
*/
int kernel_held;
};
/**
@ -778,6 +783,7 @@ struct drm_driver {
* Driver-specific callback to set memory domains from userspace
*/
int (*gem_set_domain) (struct drm_gem_object *obj,
struct drm_file *file_priv,
uint32_t read_domains,
uint32_t write_domain);
@ -1178,6 +1184,9 @@ extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
extern void drm_idlelock_take(struct drm_lock_data *lock_data);
extern void drm_idlelock_release(struct drm_lock_data *lock_data);
extern int drm_client_lock_take(struct drm_device *dev,
struct drm_file *file_priv);
extern void drm_client_lock_release(struct drm_device *dev);
/*
* These are exported to drivers so that they can implement fencing using

View File

@ -291,7 +291,7 @@ drm_gem_pread_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
if (dev->driver->gem_set_domain) {
ret = dev->driver->gem_set_domain(obj,
ret = dev->driver->gem_set_domain(obj, file_priv,
DRM_GEM_DOMAIN_CPU,
0);
if (ret) {
@ -384,7 +384,7 @@ drm_gem_pwrite_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
if (dev->driver->gem_set_domain) {
ret = dev->driver->gem_set_domain(obj,
ret = dev->driver->gem_set_domain(obj, file_priv,
DRM_GEM_DOMAIN_CPU,
DRM_GEM_DOMAIN_CPU);
if (ret) {
@ -530,7 +530,7 @@ drm_gem_set_domain_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
if (dev->driver->gem_set_domain) {
ret = dev->driver->gem_set_domain(obj,
ret = dev->driver->gem_set_domain(obj, file_priv,
args->read_domains,
args->write_domain);
} else {

View File

@ -215,22 +215,16 @@ int drm_lock_take(struct drm_lock_data *lock_data,
} while (prev != old);
spin_unlock_irqrestore(&lock_data->spinlock, irqflags);
if (_DRM_LOCKING_CONTEXT(old) == context) {
if (old & _DRM_LOCK_HELD) {
if (context != DRM_KERNEL_CONTEXT) {
DRM_ERROR("%d holds heavyweight lock\n",
context);
}
return 0;
/* Warn on recursive locking of user contexts. */
if (_DRM_LOCKING_CONTEXT(old) == context && _DRM_LOCK_IS_HELD(old)) {
if (context != DRM_KERNEL_CONTEXT) {
DRM_ERROR("%d holds heavyweight lock\n",
context);
}
return 0;
}
if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
/* Have lock */
return 1;
}
return 0;
return !_DRM_LOCK_IS_HELD(old);
}
/**
@ -386,6 +380,60 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
}
EXPORT_SYMBOL(drm_idlelock_release);
/**
* Takes the lock on behalf of the client if needed, using the kernel context.
*
* This allows us to hide the hardware lock when it's required for protection
* of data structures (such as command ringbuffer) shared with the X Server, and
* a way for us to transition to lockless for those requests when the X Server
* stops accessing the ringbuffer directly, without having to update the
* other userland clients.
*/
int drm_client_lock_take(struct drm_device *dev, struct drm_file *file_priv)
{
int ret;
unsigned long irqflags;
/* If the client has the lock, we're already done. */
if (drm_i_have_hw_lock(dev, file_priv))
return 0;
/* Client doesn't hold the lock. Block taking the lock with the kernel
* context on behalf of the client, and return whether we were
* successful.
*/
spin_lock_irqsave(&dev->lock.spinlock, irqflags);
dev->lock.user_waiters++;
spin_unlock_irqrestore(&dev->lock.spinlock, irqflags);
ret = wait_event_interruptible(dev->lock.lock_queue,
drm_lock_take(&dev->lock,
DRM_KERNEL_CONTEXT));
spin_lock_irqsave(&dev->lock.spinlock, irqflags);
dev->lock.user_waiters--;
if (ret != 0) {
spin_unlock_irqrestore(&dev->lock.spinlock, irqflags);
return ret;
} else {
dev->lock.file_priv = file_priv;
dev->lock.lock_time = jiffies;
dev->lock.kernel_held = 1;
file_priv->lock_count++;
spin_unlock_irqrestore(&dev->lock.spinlock, irqflags);
return 0;
}
}
EXPORT_SYMBOL(drm_client_lock_take);
void drm_client_lock_release(struct drm_device *dev)
{
if (dev->lock.kernel_held) {
dev->lock.kernel_held = 0;
dev->lock.file_priv = NULL;
drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
}
}
EXPORT_SYMBOL(drm_client_lock_release);
int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
{

View File

@ -1522,6 +1522,7 @@ void i915_gem_free_object(struct drm_gem_object *obj)
int
i915_gem_set_domain(struct drm_gem_object *obj,
struct drm_file *file_priv,
uint32_t read_domains,
uint32_t write_domain)
{
@ -1529,11 +1530,11 @@ i915_gem_set_domain(struct drm_gem_object *obj,
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
drm_idlelock_take (&dev->lock);
drm_client_lock_take(dev, file_priv);
i915_kernel_lost_context(dev);
i915_gem_object_set_domain(obj, read_domains, write_domain);
i915_gem_dev_set_domain(obj->dev);
drm_idlelock_release (&dev->lock);
drm_client_lock_release(dev);
return 0;
}

View File

@ -454,6 +454,7 @@ int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
int i915_gem_init_object(struct drm_gem_object *obj);
void i915_gem_free_object(struct drm_gem_object *obj);
int i915_gem_set_domain(struct drm_gem_object *obj,
struct drm_file *file_priv,
uint32_t read_domains,
uint32_t write_domain);
int i915_gem_flush_pwrite(struct drm_gem_object *obj,