Merge commit 'origin/drm-gem' into ms-gem
Conflicts: linux-core/drmP.h linux-core/drm_drv.c linux-core/drm_stub.c linux-core/i915_drv.c linux-core/i915_gem.c shared-core/i915_drv.h shared-core/i915_irq.cmain
commit
3e02f7fd31
|
@ -109,11 +109,11 @@ struct _dri_bo_gem {
|
||||||
int validate_index;
|
int validate_index;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Boolean whether set_domain to CPU is current
|
* Boolean whether we've started swrast
|
||||||
* Set when set_domain has been called
|
* Set when the buffer has been mapped
|
||||||
* Cleared when a batch has been submitted
|
* Cleared when the buffer is unmapped
|
||||||
*/
|
*/
|
||||||
int cpu_domain_set;
|
int swrast;
|
||||||
|
|
||||||
/** Array passed to the DRM containing relocation information. */
|
/** Array passed to the DRM containing relocation information. */
|
||||||
struct drm_i915_gem_relocation_entry *relocs;
|
struct drm_i915_gem_relocation_entry *relocs;
|
||||||
|
@ -485,25 +485,27 @@ dri_gem_bo_map(dri_bo *bo, int write_enable)
|
||||||
bo_gem->virtual = (void *)(uintptr_t)mmap_arg.addr_ptr;
|
bo_gem->virtual = (void *)(uintptr_t)mmap_arg.addr_ptr;
|
||||||
}
|
}
|
||||||
bo->virtual = bo_gem->virtual;
|
bo->virtual = bo_gem->virtual;
|
||||||
|
bo_gem->swrast = 0;
|
||||||
bo_gem->mapped = 1;
|
bo_gem->mapped = 1;
|
||||||
DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, bo_gem->virtual);
|
DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, bo_gem->virtual);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!bo_gem->cpu_domain_set) {
|
if (!bo_gem->swrast) {
|
||||||
set_domain.handle = bo_gem->gem_handle;
|
set_domain.handle = bo_gem->gem_handle;
|
||||||
set_domain.read_domains = I915_GEM_DOMAIN_CPU;
|
set_domain.read_domains = I915_GEM_DOMAIN_CPU;
|
||||||
set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_CPU : 0;
|
if (write_enable)
|
||||||
|
set_domain.write_domain = I915_GEM_DOMAIN_CPU;
|
||||||
|
else
|
||||||
|
set_domain.write_domain = 0;
|
||||||
do {
|
do {
|
||||||
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
|
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
|
||||||
&set_domain);
|
&set_domain);
|
||||||
} while (ret == -1 && errno == EINTR);
|
} while (ret == -1 && errno == EINTR);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
|
fprintf (stderr, "%s:%d: Error setting swrast %d: %s\n",
|
||||||
__FILE__, __LINE__,
|
__FILE__, __LINE__, bo_gem->gem_handle, strerror (errno));
|
||||||
bo_gem->gem_handle, set_domain.read_domains, set_domain.write_domain,
|
|
||||||
strerror (errno));
|
|
||||||
}
|
}
|
||||||
bo_gem->cpu_domain_set = 1;
|
bo_gem->swrast = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -512,13 +514,24 @@ dri_gem_bo_map(dri_bo *bo, int write_enable)
|
||||||
static int
|
static int
|
||||||
dri_gem_bo_unmap(dri_bo *bo)
|
dri_gem_bo_unmap(dri_bo *bo)
|
||||||
{
|
{
|
||||||
|
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
|
||||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||||
|
struct drm_i915_gem_sw_finish sw_finish;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (bo == NULL)
|
if (bo == NULL)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
assert(bo_gem->mapped);
|
assert(bo_gem->mapped);
|
||||||
|
|
||||||
|
if (bo_gem->swrast) {
|
||||||
|
sw_finish.handle = bo_gem->gem_handle;
|
||||||
|
do {
|
||||||
|
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SW_FINISH,
|
||||||
|
&sw_finish);
|
||||||
|
} while (ret == -1 && errno == EINTR);
|
||||||
|
bo_gem->swrast = 0;
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -583,7 +596,7 @@ dri_gem_bo_wait_rendering(dri_bo *bo)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
set_domain.handle = bo_gem->gem_handle;
|
set_domain.handle = bo_gem->gem_handle;
|
||||||
set_domain.read_domains = I915_GEM_DOMAIN_CPU;
|
set_domain.read_domains = I915_GEM_DOMAIN_GTT;
|
||||||
set_domain.write_domain = 0;
|
set_domain.write_domain = 0;
|
||||||
ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
|
ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
|
@ -744,8 +757,8 @@ dri_gem_post_submit(dri_bo *batch_buf)
|
||||||
dri_bo *bo = bufmgr_gem->exec_bos[i];
|
dri_bo *bo = bufmgr_gem->exec_bos[i];
|
||||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||||
|
|
||||||
/* Need to call set_domain on next bo_map */
|
/* Need to call swrast on next bo_map */
|
||||||
bo_gem->cpu_domain_set = 0;
|
bo_gem->swrast = 0;
|
||||||
|
|
||||||
/* Disconnect the buffer from the validate list */
|
/* Disconnect the buffer from the validate list */
|
||||||
bo_gem->validate_index = -1;
|
bo_gem->validate_index = -1;
|
||||||
|
|
|
@ -811,6 +811,10 @@ struct drm_driver {
|
||||||
/* Master routines */
|
/* Master routines */
|
||||||
int (*master_create)(struct drm_device *dev, struct drm_master *master);
|
int (*master_create)(struct drm_device *dev, struct drm_master *master);
|
||||||
void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
|
void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
|
||||||
|
|
||||||
|
int (*proc_init)(struct drm_minor *minor);
|
||||||
|
void (*proc_cleanup)(struct drm_minor *minor);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Driver-specific constructor for drm_gem_objects, to set up
|
* Driver-specific constructor for drm_gem_objects, to set up
|
||||||
* obj->driver_private.
|
* obj->driver_private.
|
||||||
|
@ -1366,7 +1370,7 @@ extern void drm_put_master(struct drm_master *master);
|
||||||
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
|
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
|
||||||
struct drm_driver *driver);
|
struct drm_driver *driver);
|
||||||
extern int drm_put_dev(struct drm_device *dev);
|
extern int drm_put_dev(struct drm_device *dev);
|
||||||
extern int drm_put_minor(struct drm_minor **minor);
|
extern int drm_put_minor(struct drm_device *dev, struct drm_minor **p);
|
||||||
extern unsigned int drm_debug; /* 1 to enable debug output */
|
extern unsigned int drm_debug; /* 1 to enable debug output */
|
||||||
|
|
||||||
extern struct class *drm_class;
|
extern struct class *drm_class;
|
||||||
|
|
|
@ -353,7 +353,7 @@ static inline int kobject_uevent_env(struct kobject *kobj,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIMEM))
|
#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIGHMEM))
|
||||||
#define DRM_KMAP_ATOMIC_PROT_PFN
|
#define DRM_KMAP_ATOMIC_PROT_PFN
|
||||||
extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
|
extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
|
||||||
pgprot_t protection);
|
pgprot_t protection);
|
||||||
|
|
|
@ -424,9 +424,10 @@ static void drm_cleanup(struct drm_device * dev)
|
||||||
drm_memrange_takedown(&dev->offset_manager);
|
drm_memrange_takedown(&dev->offset_manager);
|
||||||
drm_ht_remove(&dev->object_hash);
|
drm_ht_remove(&dev->object_hash);
|
||||||
|
|
||||||
drm_put_minor(&dev->primary);
|
drm_put_minor(dev, &dev->primary);
|
||||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||||
drm_put_minor(&dev->control);
|
drm_put_minor(dev, &dev->control);
|
||||||
|
|
||||||
if (drm_put_dev(dev))
|
if (drm_put_dev(dev))
|
||||||
DRM_ERROR("Cannot unload module\n");
|
DRM_ERROR("Cannot unload module\n");
|
||||||
}
|
}
|
||||||
|
|
|
@ -309,6 +309,13 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
|
||||||
DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
|
DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
|
||||||
goto err_mem;
|
goto err_mem;
|
||||||
}
|
}
|
||||||
|
if (dev->driver->proc_init) {
|
||||||
|
ret = dev->driver->proc_init(new_minor);
|
||||||
|
if (ret) {
|
||||||
|
DRM_ERROR("DRM: Driver failed to initialize /proc/dri.\n");
|
||||||
|
goto err_mem;
|
||||||
|
}
|
||||||
|
}
|
||||||
} else
|
} else
|
||||||
new_minor->dev_root = NULL;
|
new_minor->dev_root = NULL;
|
||||||
|
|
||||||
|
@ -325,8 +332,11 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
|
||||||
|
|
||||||
|
|
||||||
err_g2:
|
err_g2:
|
||||||
if (new_minor->type == DRM_MINOR_LEGACY)
|
if (new_minor->type == DRM_MINOR_LEGACY) {
|
||||||
|
if (dev->driver->proc_cleanup)
|
||||||
|
dev->driver->proc_cleanup(new_minor);
|
||||||
drm_proc_cleanup(new_minor, drm_proc_root);
|
drm_proc_cleanup(new_minor, drm_proc_root);
|
||||||
|
}
|
||||||
err_mem:
|
err_mem:
|
||||||
kfree(new_minor);
|
kfree(new_minor);
|
||||||
err_idr:
|
err_idr:
|
||||||
|
@ -398,10 +408,10 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
err_g5:
|
err_g5:
|
||||||
drm_put_minor(&dev->primary);
|
drm_put_minor(dev, &dev->primary);
|
||||||
err_g4:
|
err_g4:
|
||||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||||
drm_put_minor(&dev->control);
|
drm_put_minor(dev, &dev->control);
|
||||||
err_g3:
|
err_g3:
|
||||||
if (!drm_fb_loaded)
|
if (!drm_fb_loaded)
|
||||||
pci_disable_device(pdev);
|
pci_disable_device(pdev);
|
||||||
|
@ -452,13 +462,16 @@ int drm_put_dev(struct drm_device * dev)
|
||||||
* last minor released.
|
* last minor released.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
int drm_put_minor(struct drm_minor **minor_p)
|
int drm_put_minor(struct drm_device *dev, struct drm_minor **minor_p)
|
||||||
{
|
{
|
||||||
struct drm_minor *minor = *minor_p;
|
struct drm_minor *minor = *minor_p;
|
||||||
DRM_DEBUG("release secondary minor %d\n", minor->index);
|
DRM_DEBUG("release secondary minor %d\n", minor->index);
|
||||||
|
|
||||||
if (minor->type == DRM_MINOR_LEGACY)
|
if (minor->type == DRM_MINOR_LEGACY) {
|
||||||
|
if (dev->driver->proc_cleanup)
|
||||||
|
dev->driver->proc_cleanup(minor);
|
||||||
drm_proc_cleanup(minor, drm_proc_root);
|
drm_proc_cleanup(minor, drm_proc_root);
|
||||||
|
}
|
||||||
drm_sysfs_device_remove(minor);
|
drm_sysfs_device_remove(minor);
|
||||||
|
|
||||||
idr_remove(&drm_minors_idr, minor->index);
|
idr_remove(&drm_minors_idr, minor->index);
|
||||||
|
|
|
@ -600,6 +600,8 @@ static struct drm_driver driver = {
|
||||||
.get_reg_ofs = drm_core_get_reg_ofs,
|
.get_reg_ofs = drm_core_get_reg_ofs,
|
||||||
.master_create = i915_master_create,
|
.master_create = i915_master_create,
|
||||||
.master_destroy = i915_master_destroy,
|
.master_destroy = i915_master_destroy,
|
||||||
|
.proc_init = i915_gem_proc_init,
|
||||||
|
.proc_cleanup = i915_gem_proc_cleanup,
|
||||||
.ioctls = i915_ioctls,
|
.ioctls = i915_ioctls,
|
||||||
.gem_init_object = i915_gem_init_object,
|
.gem_init_object = i915_gem_init_object,
|
||||||
.gem_free_object = i915_gem_free_object,
|
.gem_free_object = i915_gem_free_object,
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
|
|
||||||
#include "drmP.h"
|
#include "drmP.h"
|
||||||
#include "drm.h"
|
#include "drm.h"
|
||||||
|
#include "drm_compat.h"
|
||||||
#include "i915_drm.h"
|
#include "i915_drm.h"
|
||||||
#include "i915_drv.h"
|
#include "i915_drv.h"
|
||||||
|
|
||||||
|
@ -36,6 +37,13 @@
|
||||||
#define WATCH_LRU 0
|
#define WATCH_LRU 0
|
||||||
#define WATCH_RELOC 0
|
#define WATCH_RELOC 0
|
||||||
#define WATCH_INACTIVE 0
|
#define WATCH_INACTIVE 0
|
||||||
|
#define WATCH_PWRITE 0
|
||||||
|
|
||||||
|
#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
|
||||||
|
static void
|
||||||
|
i915_gem_dump_object(struct drm_gem_object *obj, int len,
|
||||||
|
const char *where, uint32_t mark);
|
||||||
|
#endif
|
||||||
|
|
||||||
static int
|
static int
|
||||||
i915_gem_object_set_domain(struct drm_gem_object *obj,
|
i915_gem_object_set_domain(struct drm_gem_object *obj,
|
||||||
|
@ -47,6 +55,9 @@ i915_gem_set_domain(struct drm_gem_object *obj,
|
||||||
uint32_t read_domains,
|
uint32_t read_domains,
|
||||||
uint32_t write_domain);
|
uint32_t write_domain);
|
||||||
|
|
||||||
|
static void
|
||||||
|
i915_gem_clflush_object(struct drm_gem_object *obj);
|
||||||
|
|
||||||
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
|
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
|
||||||
unsigned long end)
|
unsigned long end)
|
||||||
{
|
{
|
||||||
|
@ -158,6 +169,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#include "drm_compat.h"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Writes data to the object referenced by handle.
|
* Writes data to the object referenced by handle.
|
||||||
*
|
*
|
||||||
|
@ -169,41 +182,121 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||||
{
|
{
|
||||||
struct drm_i915_gem_pwrite *args = data;
|
struct drm_i915_gem_pwrite *args = data;
|
||||||
struct drm_gem_object *obj;
|
struct drm_gem_object *obj;
|
||||||
ssize_t written;
|
struct drm_i915_gem_object *obj_priv;
|
||||||
|
ssize_t remain;
|
||||||
loff_t offset;
|
loff_t offset;
|
||||||
int ret;
|
char __user *user_data;
|
||||||
|
char *vaddr;
|
||||||
|
int i, o, l;
|
||||||
|
int ret = 0;
|
||||||
|
unsigned long pfn;
|
||||||
|
unsigned long unwritten;
|
||||||
|
|
||||||
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
||||||
if (obj == NULL)
|
if (obj == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
/** Bounds check destination.
|
||||||
|
*
|
||||||
|
* XXX: This could use review for overflow issues...
|
||||||
|
*/
|
||||||
|
if (args->offset > obj->size || args->size > obj->size ||
|
||||||
|
args->offset + args->size > obj->size)
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
user_data = (char __user *) (uintptr_t) args->data_ptr;
|
||||||
|
remain = args->size;
|
||||||
|
if (!access_ok(VERIFY_READ, user_data, remain))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
ret = i915_gem_set_domain(obj, file_priv,
|
ret = i915_gem_object_pin(obj, 0);
|
||||||
I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
drm_gem_object_unreference(obj);
|
drm_gem_object_unreference(obj);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
offset = args->offset;
|
ret = i915_gem_set_domain(obj, file_priv,
|
||||||
|
I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
|
||||||
|
if (ret)
|
||||||
|
goto fail;
|
||||||
|
|
||||||
written = vfs_write(obj->filp,
|
obj_priv = obj->driver_private;
|
||||||
(char __user *)(uintptr_t) args->data_ptr,
|
offset = obj_priv->gtt_offset + args->offset;
|
||||||
args->size, &offset);
|
obj_priv->dirty = 1;
|
||||||
|
|
||||||
if (written != args->size) {
|
while (remain > 0) {
|
||||||
drm_gem_object_unreference(obj);
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
/** Operation in this page
|
||||||
if (written < 0)
|
*
|
||||||
return written;
|
* i = page number
|
||||||
else
|
* o = offset within page
|
||||||
return -EINVAL;
|
* l = bytes to copy
|
||||||
|
*/
|
||||||
|
i = offset >> PAGE_SHIFT;
|
||||||
|
o = offset & (PAGE_SIZE-1);
|
||||||
|
l = remain;
|
||||||
|
if ((o + l) > PAGE_SIZE)
|
||||||
|
l = PAGE_SIZE - o;
|
||||||
|
|
||||||
|
pfn = (dev->agp->base >> PAGE_SHIFT) + i;
|
||||||
|
|
||||||
|
#ifdef DRM_KMAP_ATOMIC_PROT_PFN
|
||||||
|
/* kmap_atomic can't map IO pages on non-HIGHMEM kernels
|
||||||
|
*/
|
||||||
|
vaddr = kmap_atomic_prot_pfn(pfn, KM_USER0,
|
||||||
|
__pgprot(__PAGE_KERNEL));
|
||||||
|
#if WATCH_PWRITE
|
||||||
|
DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
|
||||||
|
i, o, l, pfn, vaddr);
|
||||||
|
#endif
|
||||||
|
unwritten = __copy_from_user_inatomic_nocache(vaddr + o, user_data, l);
|
||||||
|
kunmap_atomic(vaddr, KM_USER0);
|
||||||
|
|
||||||
|
if (unwritten)
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
|
||||||
|
#if WATCH_PWRITE
|
||||||
|
DRM_INFO("pwrite slow i %d o %d l %d pfn %ld vaddr %p\n",
|
||||||
|
i, o, l, pfn, vaddr);
|
||||||
|
#endif
|
||||||
|
if (vaddr == NULL) {
|
||||||
|
ret = -EFAULT;
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
unwritten = __copy_from_user(vaddr + o, user_data, l);
|
||||||
|
#if WATCH_PWRITE
|
||||||
|
DRM_INFO("unwritten %ld\n", unwritten);
|
||||||
|
#endif
|
||||||
|
iounmap(vaddr);
|
||||||
|
if (unwritten) {
|
||||||
|
ret = -EFAULT;
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
remain -= l;
|
||||||
|
user_data += l;
|
||||||
|
offset += l;
|
||||||
|
}
|
||||||
|
#if WATCH_PWRITE && 1
|
||||||
|
i915_gem_clflush_object(obj);
|
||||||
|
i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
|
||||||
|
i915_gem_clflush_object(obj);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
fail:
|
||||||
|
i915_gem_object_unpin (obj);
|
||||||
drm_gem_object_unreference(obj);
|
drm_gem_object_unreference(obj);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
return 0;
|
#if WATCH_PWRITE
|
||||||
|
if (ret)
|
||||||
|
DRM_INFO("pwrite failed %d\n", ret);
|
||||||
|
#endif
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -232,6 +325,45 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Called when user space has done writes to this buffer
|
||||||
|
*/
|
||||||
|
int
|
||||||
|
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
|
||||||
|
struct drm_file *file_priv)
|
||||||
|
{
|
||||||
|
struct drm_i915_gem_sw_finish *args = data;
|
||||||
|
struct drm_gem_object *obj;
|
||||||
|
struct drm_i915_gem_object *obj_priv;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
if (!(dev->driver->driver_features & DRIVER_GEM))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
||||||
|
if (obj == NULL) {
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if WATCH_BUF
|
||||||
|
DRM_INFO("%s: sw_finish %d (%p)\n",
|
||||||
|
__func__, args->handle, obj);
|
||||||
|
#endif
|
||||||
|
obj_priv = obj->driver_private;
|
||||||
|
|
||||||
|
/** Pinned buffers may be scanout, so flush the cache
|
||||||
|
*/
|
||||||
|
if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
|
||||||
|
i915_gem_clflush_object(obj);
|
||||||
|
drm_agp_chipset_flush(dev);
|
||||||
|
}
|
||||||
|
drm_gem_object_unreference(obj);
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Maps the contents of an object, returning the address it is mapped
|
* Maps the contents of an object, returning the address it is mapped
|
||||||
* into.
|
* into.
|
||||||
|
@ -285,8 +417,13 @@ i915_gem_object_free_page_list(struct drm_gem_object *obj)
|
||||||
|
|
||||||
|
|
||||||
for (i = 0; i < page_count; i++)
|
for (i = 0; i < page_count; i++)
|
||||||
if (obj_priv->page_list[i] != NULL)
|
if (obj_priv->page_list[i] != NULL) {
|
||||||
|
if (obj_priv->dirty)
|
||||||
|
set_page_dirty(obj_priv->page_list[i]);
|
||||||
|
mark_page_accessed(obj_priv->page_list[i]);
|
||||||
page_cache_release(obj_priv->page_list[i]);
|
page_cache_release(obj_priv->page_list[i]);
|
||||||
|
}
|
||||||
|
obj_priv->dirty = 0;
|
||||||
|
|
||||||
drm_free(obj_priv->page_list,
|
drm_free(obj_priv->page_list,
|
||||||
page_count * sizeof(struct page *),
|
page_count * sizeof(struct page *),
|
||||||
|
@ -321,7 +458,7 @@ i915_verify_inactive(struct drm_device *dev, char *file, int line)
|
||||||
|
|
||||||
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
|
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
|
||||||
obj = obj_priv->obj;
|
obj = obj_priv->obj;
|
||||||
if (obj_priv->pin_count || obj_priv->active || (obj->write_domain & ~I915_GEM_DOMAIN_CPU))
|
if (obj_priv->pin_count || obj_priv->active || (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
|
||||||
DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n",
|
DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n",
|
||||||
obj,
|
obj,
|
||||||
obj_priv->pin_count, obj_priv->active, obj->write_domain, file, line);
|
obj_priv->pin_count, obj_priv->active, obj->write_domain, file, line);
|
||||||
|
@ -496,7 +633,7 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
|
||||||
return (int32_t)(seq1 - seq2) >= 0;
|
return (int32_t)(seq1 - seq2) >= 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint32_t
|
uint32_t
|
||||||
i915_get_gem_seqno(struct drm_device *dev)
|
i915_get_gem_seqno(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
@ -524,7 +661,7 @@ i915_gem_retire_requests(struct drm_device *dev)
|
||||||
list);
|
list);
|
||||||
retiring_seqno = request->seqno;
|
retiring_seqno = request->seqno;
|
||||||
|
|
||||||
if (i915_seqno_passed(seqno, retiring_seqno)) {
|
if (i915_seqno_passed(seqno, retiring_seqno) || dev_priv->mm.wedged) {
|
||||||
i915_gem_retire_request(dev, request);
|
i915_gem_retire_request(dev, request);
|
||||||
|
|
||||||
list_del(&request->list);
|
list_del(&request->list);
|
||||||
|
@ -564,12 +701,17 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
|
||||||
BUG_ON(seqno == 0);
|
BUG_ON(seqno == 0);
|
||||||
|
|
||||||
if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
|
if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
|
||||||
|
dev_priv->mm.waiting_gem_seqno = seqno;
|
||||||
i915_user_irq_on(dev);
|
i915_user_irq_on(dev);
|
||||||
ret = wait_event_interruptible(dev_priv->irq_queue,
|
ret = wait_event_interruptible(dev_priv->irq_queue,
|
||||||
i915_seqno_passed(i915_get_gem_seqno(dev),
|
i915_seqno_passed(i915_get_gem_seqno(dev),
|
||||||
seqno));
|
seqno) || dev_priv->mm.wedged);
|
||||||
i915_user_irq_off(dev);
|
i915_user_irq_off(dev);
|
||||||
|
dev_priv->mm.waiting_gem_seqno = 0;
|
||||||
}
|
}
|
||||||
|
if (dev_priv->mm.wedged)
|
||||||
|
ret = -EIO;
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
|
DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
|
||||||
__func__, ret, seqno, i915_get_gem_seqno(dev));
|
__func__, ret, seqno, i915_get_gem_seqno(dev));
|
||||||
|
@ -602,7 +744,7 @@ i915_gem_flush(struct drm_device *dev,
|
||||||
if (flush_domains & I915_GEM_DOMAIN_CPU)
|
if (flush_domains & I915_GEM_DOMAIN_CPU)
|
||||||
drm_agp_chipset_flush(dev);
|
drm_agp_chipset_flush(dev);
|
||||||
|
|
||||||
if ((invalidate_domains|flush_domains) & ~I915_GEM_DOMAIN_CPU) {
|
if ((invalidate_domains|flush_domains) & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
|
||||||
/*
|
/*
|
||||||
* read/write caches:
|
* read/write caches:
|
||||||
*
|
*
|
||||||
|
@ -670,7 +812,7 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
|
||||||
/* If there are writes queued to the buffer, flush and
|
/* If there are writes queued to the buffer, flush and
|
||||||
* create a new seqno to wait for.
|
* create a new seqno to wait for.
|
||||||
*/
|
*/
|
||||||
if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU)) {
|
if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
|
||||||
uint32_t write_domain = obj->write_domain;
|
uint32_t write_domain = obj->write_domain;
|
||||||
#if WATCH_BUF
|
#if WATCH_BUF
|
||||||
DRM_INFO("%s: flushing object %p from write domain %08x\n",
|
DRM_INFO("%s: flushing object %p from write domain %08x\n",
|
||||||
|
@ -756,11 +898,13 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
|
||||||
|
|
||||||
i915_gem_object_free_page_list(obj);
|
i915_gem_object_free_page_list(obj);
|
||||||
|
|
||||||
|
if (obj_priv->gtt_space) {
|
||||||
atomic_dec(&dev->gtt_count);
|
atomic_dec(&dev->gtt_count);
|
||||||
atomic_sub(obj->size, &dev->gtt_memory);
|
atomic_sub(obj->size, &dev->gtt_memory);
|
||||||
|
|
||||||
drm_memrange_put_block(obj_priv->gtt_space);
|
drm_memrange_put_block(obj_priv->gtt_space);
|
||||||
obj_priv->gtt_space = NULL;
|
obj_priv->gtt_space = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/* Remove ourselves from the LRU list if present. */
|
/* Remove ourselves from the LRU list if present. */
|
||||||
if (!list_empty(&obj_priv->list))
|
if (!list_empty(&obj_priv->list))
|
||||||
|
@ -769,7 +913,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if WATCH_BUF | WATCH_EXEC
|
#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
|
||||||
static void
|
static void
|
||||||
i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
|
i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
|
||||||
uint32_t bias, uint32_t mark)
|
uint32_t bias, uint32_t mark)
|
||||||
|
@ -886,6 +1030,8 @@ i915_gem_evict_something(struct drm_device *dev)
|
||||||
list);
|
list);
|
||||||
|
|
||||||
ret = i915_wait_request(dev, request->seqno);
|
ret = i915_wait_request(dev, request->seqno);
|
||||||
|
if (ret)
|
||||||
|
break;
|
||||||
|
|
||||||
/* if waiting caused an object to become inactive,
|
/* if waiting caused an object to become inactive,
|
||||||
* then loop around and wait for it. Otherwise, we
|
* then loop around and wait for it. Otherwise, we
|
||||||
|
@ -934,6 +1080,11 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
|
||||||
{
|
{
|
||||||
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
||||||
int page_count, i;
|
int page_count, i;
|
||||||
|
struct address_space *mapping;
|
||||||
|
struct inode *inode;
|
||||||
|
struct page *page;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (obj_priv->page_list)
|
if (obj_priv->page_list)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -949,16 +1100,25 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inode = obj->filp->f_path.dentry->d_inode;
|
||||||
|
mapping = inode->i_mapping;
|
||||||
for (i = 0; i < page_count; i++) {
|
for (i = 0; i < page_count; i++) {
|
||||||
obj_priv->page_list[i] =
|
page = find_get_page(mapping, i);
|
||||||
find_or_create_page(obj->filp->f_mapping, i, GFP_HIGHUSER);
|
if (page == NULL || !PageUptodate(page)) {
|
||||||
|
if (page) {
|
||||||
if (obj_priv->page_list[i] == NULL) {
|
page_cache_release(page);
|
||||||
DRM_ERROR("Failed to find_or_create_page()\n");
|
page = NULL;
|
||||||
i915_gem_object_free_page_list(obj);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
}
|
||||||
unlock_page(obj_priv->page_list[i]);
|
ret = shmem_getpage(inode, i, &page, SGP_DIRTY, NULL);
|
||||||
|
|
||||||
|
if (ret) {
|
||||||
|
DRM_ERROR("shmem_getpage failed: %d\n", ret);
|
||||||
|
i915_gem_object_free_page_list(obj);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
unlock_page(page);
|
||||||
|
}
|
||||||
|
obj_priv->page_list[i] = page;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1049,8 +1209,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
|
||||||
* wasn't in the GTT, there shouldn't be any way it could have been in
|
* wasn't in the GTT, there shouldn't be any way it could have been in
|
||||||
* a GPU cache
|
* a GPU cache
|
||||||
*/
|
*/
|
||||||
BUG_ON(obj->read_domains & ~I915_GEM_DOMAIN_CPU);
|
BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
|
||||||
BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
|
BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1187,13 +1347,16 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
|
||||||
uint32_t write_domain)
|
uint32_t write_domain)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = obj->dev;
|
struct drm_device *dev = obj->dev;
|
||||||
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
||||||
uint32_t invalidate_domains = 0;
|
uint32_t invalidate_domains = 0;
|
||||||
uint32_t flush_domains = 0;
|
uint32_t flush_domains = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
#if WATCH_BUF
|
#if WATCH_BUF
|
||||||
DRM_INFO("%s: object %p read %08x write %08x\n",
|
DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
|
||||||
__func__, obj, read_domains, write_domain);
|
__func__, obj,
|
||||||
|
obj->read_domains, read_domains,
|
||||||
|
obj->write_domain, write_domain);
|
||||||
#endif
|
#endif
|
||||||
/*
|
/*
|
||||||
* If the object isn't moving to a new write domain,
|
* If the object isn't moving to a new write domain,
|
||||||
|
@ -1201,6 +1364,8 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
|
||||||
*/
|
*/
|
||||||
if (write_domain == 0)
|
if (write_domain == 0)
|
||||||
read_domains |= obj->read_domains;
|
read_domains |= obj->read_domains;
|
||||||
|
else
|
||||||
|
obj_priv->dirty = 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flush the current write domain if
|
* Flush the current write domain if
|
||||||
|
@ -1228,7 +1393,7 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
|
||||||
* flushed before the cpu cache is invalidated
|
* flushed before the cpu cache is invalidated
|
||||||
*/
|
*/
|
||||||
if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
|
if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
|
||||||
(flush_domains & ~I915_GEM_DOMAIN_CPU)) {
|
(flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))) {
|
||||||
ret = i915_gem_object_wait_rendering(obj);
|
ret = i915_gem_object_wait_rendering(obj);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1241,6 +1406,12 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
|
||||||
obj->read_domains = read_domains;
|
obj->read_domains = read_domains;
|
||||||
dev->invalidate_domains |= invalidate_domains;
|
dev->invalidate_domains |= invalidate_domains;
|
||||||
dev->flush_domains |= flush_domains;
|
dev->flush_domains |= flush_domains;
|
||||||
|
#if WATCH_BUF
|
||||||
|
DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
|
||||||
|
__func__,
|
||||||
|
obj->read_domains, obj->write_domain,
|
||||||
|
dev->invalidate_domains, dev->flush_domains);
|
||||||
|
#endif
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1664,6 +1835,13 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
|
||||||
i915_verify_inactive(dev, __FILE__, __LINE__);
|
i915_verify_inactive(dev, __FILE__, __LINE__);
|
||||||
|
|
||||||
|
if (dev_priv->mm.wedged) {
|
||||||
|
DRM_ERROR("Execbuf while wedged\n");
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
if (dev_priv->mm.suspended) {
|
if (dev_priv->mm.suspended) {
|
||||||
DRM_ERROR("Execbuf while VT-switched.\n");
|
DRM_ERROR("Execbuf while VT-switched.\n");
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
@ -1844,7 +2022,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
|
||||||
if (obj_priv->pin_count == 1) {
|
if (obj_priv->pin_count == 1) {
|
||||||
atomic_inc(&dev->pin_count);
|
atomic_inc(&dev->pin_count);
|
||||||
atomic_add(obj->size, &dev->pin_memory);
|
atomic_add(obj->size, &dev->pin_memory);
|
||||||
if (!obj_priv->active && (obj->write_domain & ~I915_GEM_DOMAIN_CPU) == 0 &&
|
if (!obj_priv->active && (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) == 0 &&
|
||||||
!list_empty(&obj_priv->list))
|
!list_empty(&obj_priv->list))
|
||||||
list_del_init(&obj_priv->list);
|
list_del_init(&obj_priv->list);
|
||||||
}
|
}
|
||||||
|
@ -1870,7 +2048,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
|
||||||
* the inactive list
|
* the inactive list
|
||||||
*/
|
*/
|
||||||
if (obj_priv->pin_count == 0) {
|
if (obj_priv->pin_count == 0) {
|
||||||
if (!obj_priv->active && (obj->write_domain & ~I915_GEM_DOMAIN_CPU) == 0)
|
if (!obj_priv->active && (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) == 0)
|
||||||
list_move_tail(&obj_priv->list,
|
list_move_tail(&obj_priv->list,
|
||||||
&dev_priv->mm.inactive_list);
|
&dev_priv->mm.inactive_list);
|
||||||
atomic_dec(&dev->pin_count);
|
atomic_dec(&dev->pin_count);
|
||||||
|
@ -1906,6 +2084,14 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** XXX - flush the CPU caches for pinned objects
|
||||||
|
* as the X server doesn't manage domains yet
|
||||||
|
*/
|
||||||
|
if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
|
||||||
|
i915_gem_clflush_object(obj);
|
||||||
|
drm_agp_chipset_flush(dev);
|
||||||
|
obj->write_domain = 0;
|
||||||
|
}
|
||||||
args->offset = obj_priv->gtt_offset;
|
args->offset = obj_priv->gtt_offset;
|
||||||
drm_gem_object_unreference(obj);
|
drm_gem_object_unreference(obj);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
@ -1994,6 +2180,11 @@ int i915_gem_init_object(struct drm_gem_object *obj)
|
||||||
|
|
||||||
void i915_gem_free_object(struct drm_gem_object *obj)
|
void i915_gem_free_object(struct drm_gem_object *obj)
|
||||||
{
|
{
|
||||||
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
||||||
|
|
||||||
|
while (obj_priv->pin_count > 0)
|
||||||
|
i915_gem_object_unpin(obj);
|
||||||
|
|
||||||
i915_gem_object_unbind(obj);
|
i915_gem_object_unbind(obj);
|
||||||
|
|
||||||
drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
|
drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
|
||||||
|
@ -2007,43 +2198,123 @@ i915_gem_set_domain(struct drm_gem_object *obj,
|
||||||
{
|
{
|
||||||
struct drm_device *dev = obj->dev;
|
struct drm_device *dev = obj->dev;
|
||||||
int ret;
|
int ret;
|
||||||
|
uint32_t flush_domains;
|
||||||
|
|
||||||
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
|
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||||
|
|
||||||
ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
|
ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
i915_gem_dev_set_domain(obj->dev);
|
flush_domains = i915_gem_dev_set_domain(obj->dev);
|
||||||
|
|
||||||
|
if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
|
||||||
|
(void) i915_add_request(dev, flush_domains);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
/** Unbinds all objects that are on the given buffer list. */
|
||||||
i915_gem_lastclose(struct drm_device *dev)
|
static int
|
||||||
|
i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_gem_object *obj;
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
|
|
||||||
/* Assume that the chip has been idled at this point. Just pull them
|
|
||||||
* off the execution list and unref them. Since this is the last
|
|
||||||
* close, this is also the last ref and they'll go away.
|
|
||||||
*/
|
|
||||||
|
|
||||||
while (!list_empty(&dev_priv->mm.active_list)) {
|
|
||||||
struct drm_i915_gem_object *obj_priv;
|
struct drm_i915_gem_object *obj_priv;
|
||||||
|
int ret;
|
||||||
|
|
||||||
obj_priv = list_first_entry(&dev_priv->mm.active_list,
|
while (!list_empty(head)) {
|
||||||
|
obj_priv = list_first_entry(head,
|
||||||
struct drm_i915_gem_object,
|
struct drm_i915_gem_object,
|
||||||
list);
|
list);
|
||||||
|
obj = obj_priv->obj;
|
||||||
|
|
||||||
list_del_init(&obj_priv->list);
|
if (obj_priv->pin_count != 0) {
|
||||||
obj_priv->active = 0;
|
DRM_ERROR("Pinned object in unbind list\n");
|
||||||
obj_priv->obj->write_domain = 0;
|
mutex_unlock(&dev->struct_mutex);
|
||||||
drm_gem_object_unreference(obj_priv->obj);
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = i915_gem_object_unbind(obj);
|
||||||
|
if (ret != 0) {
|
||||||
|
DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
|
||||||
|
ret);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
i915_gem_idle(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
uint32_t seqno, cur_seqno, last_seqno;
|
||||||
|
int stuck;
|
||||||
|
|
||||||
|
if (dev_priv->mm.suspended)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* Hack! Don't let anybody do execbuf while we don't control the chip.
|
||||||
|
* We need to replace this with a semaphore, or something.
|
||||||
|
*/
|
||||||
|
dev_priv->mm.suspended = 1;
|
||||||
|
|
||||||
|
i915_kernel_lost_context(dev);
|
||||||
|
|
||||||
|
/* Flush the GPU along with all non-CPU write domains
|
||||||
|
*/
|
||||||
|
i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
|
||||||
|
~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
|
||||||
|
seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
|
||||||
|
|
||||||
|
if (seqno == 0) {
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
dev_priv->mm.waiting_gem_seqno = seqno;
|
||||||
|
last_seqno = 0;
|
||||||
|
stuck = 0;
|
||||||
|
for (;;) {
|
||||||
|
cur_seqno = i915_get_gem_seqno(dev);
|
||||||
|
if (i915_seqno_passed(cur_seqno, seqno))
|
||||||
|
break;
|
||||||
|
if (last_seqno == cur_seqno) {
|
||||||
|
if (stuck++ > 100) {
|
||||||
|
DRM_ERROR("hardware wedged\n");
|
||||||
|
dev_priv->mm.wedged = 1;
|
||||||
|
DRM_WAKEUP(&dev_priv->irq_queue);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
msleep(10);
|
||||||
|
last_seqno = cur_seqno;
|
||||||
|
}
|
||||||
|
dev_priv->mm.waiting_gem_seqno = 0;
|
||||||
|
|
||||||
|
i915_gem_retire_requests(dev);
|
||||||
|
|
||||||
|
/* Active and flushing should now be empty as we've
|
||||||
|
* waited for a sequence higher than any pending execbuffer
|
||||||
|
*/
|
||||||
|
BUG_ON(!list_empty(&dev_priv->mm.active_list));
|
||||||
|
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
|
||||||
|
|
||||||
|
/* Request should now be empty as we've also waited
|
||||||
|
* for the last request in the list
|
||||||
|
*/
|
||||||
|
BUG_ON(!list_empty(&dev_priv->mm.request_list));
|
||||||
|
|
||||||
|
/* Move all buffers out of the GTT. */
|
||||||
|
i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
|
||||||
|
|
||||||
|
BUG_ON(!list_empty(&dev_priv->mm.active_list));
|
||||||
|
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
|
||||||
|
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
|
||||||
|
BUG_ON(!list_empty(&dev_priv->mm.request_list));
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
|
@ -2128,6 +2399,11 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (dev_priv->mm.wedged) {
|
||||||
|
DRM_ERROR("Renabling wedged hardware, good luck\n");
|
||||||
|
dev_priv->mm.wedged = 0;
|
||||||
|
}
|
||||||
|
|
||||||
ret = i915_gem_init_ringbuffer(dev);
|
ret = i915_gem_init_ringbuffer(dev);
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -2142,91 +2418,293 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Unbinds all objects that are on the given buffer list. */
|
|
||||||
static int
|
|
||||||
i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
|
|
||||||
{
|
|
||||||
struct drm_gem_object *obj;
|
|
||||||
struct drm_i915_gem_object *obj_priv;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
while (!list_empty(head)) {
|
|
||||||
obj_priv = list_first_entry(head,
|
|
||||||
struct drm_i915_gem_object,
|
|
||||||
list);
|
|
||||||
obj = obj_priv->obj;
|
|
||||||
|
|
||||||
if (obj_priv->pin_count != 0) {
|
|
||||||
DRM_ERROR("Pinned object in unbind list\n");
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = i915_gem_object_unbind(obj);
|
|
||||||
if (ret != 0) {
|
|
||||||
DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
|
|
||||||
ret);
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
int
|
||||||
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
|
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv)
|
struct drm_file *file_priv)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
uint32_t seqno;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
/* Hack! Don't let anybody do execbuf while we don't control the chip.
|
ret = i915_gem_idle(dev);
|
||||||
* We need to replace this with a semaphore, or something.
|
if (ret == 0)
|
||||||
*/
|
|
||||||
dev_priv->mm.suspended = 1;
|
|
||||||
|
|
||||||
i915_kernel_lost_context(dev);
|
|
||||||
|
|
||||||
/* Flush the GPU along with all non-CPU write domains
|
|
||||||
*/
|
|
||||||
i915_gem_flush(dev, ~I915_GEM_DOMAIN_CPU, ~I915_GEM_DOMAIN_CPU);
|
|
||||||
seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU);
|
|
||||||
if (seqno == 0) {
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
ret = i915_wait_request(dev, seqno);
|
|
||||||
if (ret) {
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Active and flushing should now be empty as we've
|
|
||||||
* waited for a sequence higher than any pending execbuffer
|
|
||||||
*/
|
|
||||||
BUG_ON(!list_empty(&dev_priv->mm.active_list));
|
|
||||||
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
|
|
||||||
|
|
||||||
/* Request should now be empty as we've also waited
|
|
||||||
* for the last request in the list
|
|
||||||
*/
|
|
||||||
BUG_ON(!list_empty(&dev_priv->mm.request_list));
|
|
||||||
|
|
||||||
/* Move all buffers out of the GTT. */
|
|
||||||
i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
|
|
||||||
|
|
||||||
BUG_ON(!list_empty(&dev_priv->mm.active_list));
|
|
||||||
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
|
|
||||||
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
|
|
||||||
BUG_ON(!list_empty(&dev_priv->mm.request_list));
|
|
||||||
|
|
||||||
i915_gem_cleanup_ringbuffer(dev);
|
i915_gem_cleanup_ringbuffer(dev);
|
||||||
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int i915_gem_active_info(char *buf, char **start, off_t offset,
|
||||||
|
int request, int *eof, void *data)
|
||||||
|
{
|
||||||
|
struct drm_minor *minor = (struct drm_minor *) data;
|
||||||
|
struct drm_device *dev = minor->dev;
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
struct drm_i915_gem_object *obj_priv;
|
||||||
|
int len = 0;
|
||||||
|
|
||||||
|
if (offset > DRM_PROC_LIMIT) {
|
||||||
|
*eof = 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
*start = &buf[offset];
|
||||||
|
*eof = 0;
|
||||||
|
DRM_PROC_PRINT("Active:\n");
|
||||||
|
list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
|
||||||
|
list)
|
||||||
|
{
|
||||||
|
struct drm_gem_object *obj = obj_priv->obj;
|
||||||
|
if (obj->name) {
|
||||||
|
DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
|
||||||
|
obj, obj->name,
|
||||||
|
obj->read_domains, obj->write_domain,
|
||||||
|
obj_priv->last_rendering_seqno);
|
||||||
|
} else {
|
||||||
|
DRM_PROC_PRINT(" %p: %08x %08x %d\n",
|
||||||
|
obj,
|
||||||
|
obj->read_domains, obj->write_domain,
|
||||||
|
obj_priv->last_rendering_seqno);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (len > request + offset)
|
||||||
|
return request;
|
||||||
|
*eof = 1;
|
||||||
|
return len - offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
|
||||||
|
int request, int *eof, void *data)
|
||||||
|
{
|
||||||
|
struct drm_minor *minor = (struct drm_minor *) data;
|
||||||
|
struct drm_device *dev = minor->dev;
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
struct drm_i915_gem_object *obj_priv;
|
||||||
|
int len = 0;
|
||||||
|
|
||||||
|
if (offset > DRM_PROC_LIMIT) {
|
||||||
|
*eof = 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
*start = &buf[offset];
|
||||||
|
*eof = 0;
|
||||||
|
DRM_PROC_PRINT("Flushing:\n");
|
||||||
|
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
|
||||||
|
list)
|
||||||
|
{
|
||||||
|
struct drm_gem_object *obj = obj_priv->obj;
|
||||||
|
if (obj->name) {
|
||||||
|
DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
|
||||||
|
obj, obj->name,
|
||||||
|
obj->read_domains, obj->write_domain,
|
||||||
|
obj_priv->last_rendering_seqno);
|
||||||
|
} else {
|
||||||
|
DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
|
||||||
|
obj->read_domains, obj->write_domain,
|
||||||
|
obj_priv->last_rendering_seqno);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (len > request + offset)
|
||||||
|
return request;
|
||||||
|
*eof = 1;
|
||||||
|
return len - offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
|
||||||
|
int request, int *eof, void *data)
|
||||||
|
{
|
||||||
|
struct drm_minor *minor = (struct drm_minor *) data;
|
||||||
|
struct drm_device *dev = minor->dev;
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
struct drm_i915_gem_object *obj_priv;
|
||||||
|
int len = 0;
|
||||||
|
|
||||||
|
if (offset > DRM_PROC_LIMIT) {
|
||||||
|
*eof = 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
*start = &buf[offset];
|
||||||
|
*eof = 0;
|
||||||
|
DRM_PROC_PRINT("Inactive:\n");
|
||||||
|
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
|
||||||
|
list)
|
||||||
|
{
|
||||||
|
struct drm_gem_object *obj = obj_priv->obj;
|
||||||
|
if (obj->name) {
|
||||||
|
DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
|
||||||
|
obj, obj->name,
|
||||||
|
obj->read_domains, obj->write_domain,
|
||||||
|
obj_priv->last_rendering_seqno);
|
||||||
|
} else {
|
||||||
|
DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
|
||||||
|
obj->read_domains, obj->write_domain,
|
||||||
|
obj_priv->last_rendering_seqno);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (len > request + offset)
|
||||||
|
return request;
|
||||||
|
*eof = 1;
|
||||||
|
return len - offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int i915_gem_request_info(char *buf, char **start, off_t offset,
|
||||||
|
int request, int *eof, void *data)
|
||||||
|
{
|
||||||
|
struct drm_minor *minor = (struct drm_minor *) data;
|
||||||
|
struct drm_device *dev = minor->dev;
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
struct drm_i915_gem_request *gem_request;
|
||||||
|
int len = 0;
|
||||||
|
|
||||||
|
if (offset > DRM_PROC_LIMIT) {
|
||||||
|
*eof = 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
*start = &buf[offset];
|
||||||
|
*eof = 0;
|
||||||
|
DRM_PROC_PRINT("Request:\n");
|
||||||
|
list_for_each_entry(gem_request, &dev_priv->mm.request_list,
|
||||||
|
list)
|
||||||
|
{
|
||||||
|
DRM_PROC_PRINT (" %d @ %d %08x\n",
|
||||||
|
gem_request->seqno,
|
||||||
|
(int) (jiffies - gem_request->emitted_jiffies),
|
||||||
|
gem_request->flush_domains);
|
||||||
|
}
|
||||||
|
if (len > request + offset)
|
||||||
|
return request;
|
||||||
|
*eof = 1;
|
||||||
|
return len - offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
|
||||||
|
int request, int *eof, void *data)
|
||||||
|
{
|
||||||
|
struct drm_minor *minor = (struct drm_minor *) data;
|
||||||
|
struct drm_device *dev = minor->dev;
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
int len = 0;
|
||||||
|
|
||||||
|
if (offset > DRM_PROC_LIMIT) {
|
||||||
|
*eof = 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
*start = &buf[offset];
|
||||||
|
*eof = 0;
|
||||||
|
DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev));
|
||||||
|
DRM_PROC_PRINT("Waiter sequence: %d\n", dev_priv->mm.waiting_gem_seqno);
|
||||||
|
DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
|
||||||
|
if (len > request + offset)
|
||||||
|
return request;
|
||||||
|
*eof = 1;
|
||||||
|
return len - offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static int i915_interrupt_info(char *buf, char **start, off_t offset,
|
||||||
|
int request, int *eof, void *data)
|
||||||
|
{
|
||||||
|
struct drm_minor *minor = (struct drm_minor *) data;
|
||||||
|
struct drm_device *dev = minor->dev;
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
int len = 0;
|
||||||
|
|
||||||
|
if (offset > DRM_PROC_LIMIT) {
|
||||||
|
*eof = 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
*start = &buf[offset];
|
||||||
|
*eof = 0;
|
||||||
|
DRM_PROC_PRINT("Interrupt enable: %08x\n",
|
||||||
|
I915_READ(IER));
|
||||||
|
DRM_PROC_PRINT("Interrupt identity: %08x\n",
|
||||||
|
I915_READ(IIR));
|
||||||
|
DRM_PROC_PRINT("Interrupt mask: %08x\n",
|
||||||
|
I915_READ(IMR));
|
||||||
|
DRM_PROC_PRINT("Pipe A stat: %08x\n",
|
||||||
|
I915_READ(PIPEASTAT));
|
||||||
|
DRM_PROC_PRINT("Pipe B stat: %08x\n",
|
||||||
|
I915_READ(PIPEBSTAT));
|
||||||
|
DRM_PROC_PRINT("Interrupts received: %d\n",
|
||||||
|
atomic_read(&dev_priv->irq_received));
|
||||||
|
DRM_PROC_PRINT("Current sequence: %d\n",
|
||||||
|
i915_get_gem_seqno(dev));
|
||||||
|
DRM_PROC_PRINT("Waiter sequence: %d\n",
|
||||||
|
dev_priv->mm.waiting_gem_seqno);
|
||||||
|
DRM_PROC_PRINT("IRQ sequence: %d\n",
|
||||||
|
dev_priv->mm.irq_gem_seqno);
|
||||||
|
if (len > request + offset)
|
||||||
|
return request;
|
||||||
|
*eof = 1;
|
||||||
|
return len - offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct drm_proc_list {
|
||||||
|
const char *name; /**< file name */
|
||||||
|
int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/
|
||||||
|
} i915_gem_proc_list[] = {
|
||||||
|
{"i915_gem_active", i915_gem_active_info},
|
||||||
|
{"i915_gem_flushing", i915_gem_flushing_info},
|
||||||
|
{"i915_gem_inactive", i915_gem_inactive_info},
|
||||||
|
{"i915_gem_request", i915_gem_request_info},
|
||||||
|
{"i915_gem_seqno", i915_gem_seqno_info},
|
||||||
|
{"i915_gem_interrupt", i915_interrupt_info},
|
||||||
|
};
|
||||||
|
|
||||||
|
#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
|
||||||
|
|
||||||
|
int i915_gem_proc_init(struct drm_minor *minor)
|
||||||
|
{
|
||||||
|
struct proc_dir_entry *ent;
|
||||||
|
int i, j;
|
||||||
|
|
||||||
|
for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
|
||||||
|
ent = create_proc_entry(i915_gem_proc_list[i].name,
|
||||||
|
S_IFREG | S_IRUGO, minor->dev_root);
|
||||||
|
if (!ent) {
|
||||||
|
DRM_ERROR("Cannot create /proc/dri/.../%s\n",
|
||||||
|
i915_gem_proc_list[i].name);
|
||||||
|
for (j = 0; j < i; j++)
|
||||||
|
remove_proc_entry(i915_gem_proc_list[i].name,
|
||||||
|
minor->dev_root);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
ent->read_proc = i915_gem_proc_list[i].f;
|
||||||
|
ent->data = minor;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void i915_gem_proc_cleanup(struct drm_minor *minor)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (!minor->dev_root)
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
|
||||||
|
remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
i915_gem_lastclose(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
|
||||||
|
if (dev_priv->ring.ring_obj != NULL) {
|
||||||
|
ret = i915_gem_idle(dev);
|
||||||
|
if (ret)
|
||||||
|
DRM_ERROR("failed to idle hardware: %d\n", ret);
|
||||||
|
|
||||||
|
i915_gem_cleanup_ringbuffer(dev);
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
}
|
||||||
|
|
|
@ -1093,6 +1093,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
|
||||||
DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
|
DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
|
||||||
DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
|
DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
|
||||||
DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
|
DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
|
||||||
|
DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
|
||||||
};
|
};
|
||||||
|
|
||||||
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
|
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
|
||||||
|
|
|
@ -189,6 +189,7 @@ typedef struct drm_i915_sarea {
|
||||||
#define DRM_I915_GEM_PWRITE 0x1d
|
#define DRM_I915_GEM_PWRITE 0x1d
|
||||||
#define DRM_I915_GEM_MMAP 0x1e
|
#define DRM_I915_GEM_MMAP 0x1e
|
||||||
#define DRM_I915_GEM_SET_DOMAIN 0x1f
|
#define DRM_I915_GEM_SET_DOMAIN 0x1f
|
||||||
|
#define DRM_I915_GEM_SW_FINISH 0x20
|
||||||
|
|
||||||
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
|
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
|
||||||
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
|
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
|
||||||
|
@ -221,6 +222,7 @@ typedef struct drm_i915_sarea {
|
||||||
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
|
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
|
||||||
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
|
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
|
||||||
#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
|
#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
|
||||||
|
#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
|
||||||
|
|
||||||
/* Asynchronous page flipping:
|
/* Asynchronous page flipping:
|
||||||
*/
|
*/
|
||||||
|
@ -505,6 +507,11 @@ struct drm_i915_gem_set_domain {
|
||||||
uint32_t write_domain;
|
uint32_t write_domain;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct drm_i915_gem_sw_finish {
|
||||||
|
/** Handle for the object */
|
||||||
|
uint32_t handle;
|
||||||
|
};
|
||||||
|
|
||||||
struct drm_i915_gem_relocation_entry {
|
struct drm_i915_gem_relocation_entry {
|
||||||
/**
|
/**
|
||||||
* Handle of the buffer being pointed to by this relocation entry.
|
* Handle of the buffer being pointed to by this relocation entry.
|
||||||
|
@ -569,6 +576,8 @@ struct drm_i915_gem_relocation_entry {
|
||||||
#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
|
#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
|
||||||
/** Vertex address cache */
|
/** Vertex address cache */
|
||||||
#define I915_GEM_DOMAIN_VERTEX 0x00000020
|
#define I915_GEM_DOMAIN_VERTEX 0x00000020
|
||||||
|
/** GTT domain - aperture and scanout */
|
||||||
|
#define I915_GEM_DOMAIN_GTT 0x00000040
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
struct drm_i915_gem_exec_object {
|
struct drm_i915_gem_exec_object {
|
||||||
|
|
|
@ -134,7 +134,6 @@ struct drm_i915_private {
|
||||||
|
|
||||||
wait_queue_head_t irq_queue;
|
wait_queue_head_t irq_queue;
|
||||||
atomic_t irq_received;
|
atomic_t irq_received;
|
||||||
atomic_t irq_emitted;
|
|
||||||
|
|
||||||
int tex_lru_log_granularity;
|
int tex_lru_log_granularity;
|
||||||
int allow_batchbuffer;
|
int allow_batchbuffer;
|
||||||
|
@ -235,6 +234,16 @@ struct drm_i915_private {
|
||||||
|
|
||||||
uint32_t next_gem_seqno;
|
uint32_t next_gem_seqno;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Waiting sequence number, if any
|
||||||
|
*/
|
||||||
|
uint32_t waiting_gem_seqno;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Last seq seen at irq time
|
||||||
|
*/
|
||||||
|
uint32_t irq_gem_seqno;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Flag if the X Server, and thus DRM, is not currently in
|
* Flag if the X Server, and thus DRM, is not currently in
|
||||||
* control of the device.
|
* control of the device.
|
||||||
|
@ -244,6 +253,15 @@ struct drm_i915_private {
|
||||||
* transitioned away from for kernel modesetting.
|
* transitioned away from for kernel modesetting.
|
||||||
*/
|
*/
|
||||||
int suspended;
|
int suspended;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Flag if the hardware appears to be wedged.
|
||||||
|
*
|
||||||
|
* This is set when attempts to idle the device timeout.
|
||||||
|
* It prevents command submission from occuring and makes
|
||||||
|
* every pending request fail
|
||||||
|
*/
|
||||||
|
int wedged;
|
||||||
} mm;
|
} mm;
|
||||||
|
|
||||||
struct work_struct user_interrupt_task;
|
struct work_struct user_interrupt_task;
|
||||||
|
@ -369,6 +387,12 @@ struct drm_i915_gem_object {
|
||||||
*/
|
*/
|
||||||
int active;
|
int active;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is set if the object has been written to since last bound
|
||||||
|
* to the GTT
|
||||||
|
*/
|
||||||
|
int dirty;
|
||||||
|
|
||||||
/** AGP memory structure for our GTT binding. */
|
/** AGP memory structure for our GTT binding. */
|
||||||
DRM_AGP_MEM *agp_mem;
|
DRM_AGP_MEM *agp_mem;
|
||||||
|
|
||||||
|
@ -521,6 +545,8 @@ int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
|
int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
|
||||||
|
struct drm_file *file_priv);
|
||||||
int i915_gem_execbuffer(struct drm_device *dev, void *data,
|
int i915_gem_execbuffer(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
|
int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
|
||||||
|
@ -535,11 +561,14 @@ int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
|
int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
|
int i915_gem_proc_init(struct drm_minor *minor);
|
||||||
|
void i915_gem_proc_cleanup(struct drm_minor *minor);
|
||||||
int i915_gem_init_object(struct drm_gem_object *obj);
|
int i915_gem_init_object(struct drm_gem_object *obj);
|
||||||
void i915_gem_free_object(struct drm_gem_object *obj);
|
void i915_gem_free_object(struct drm_gem_object *obj);
|
||||||
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
|
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
|
||||||
void i915_gem_object_unpin(struct drm_gem_object *obj);
|
void i915_gem_object_unpin(struct drm_gem_object *obj);
|
||||||
void i915_gem_lastclose(struct drm_device *dev);
|
void i915_gem_lastclose(struct drm_device *dev);
|
||||||
|
uint32_t i915_get_gem_seqno(struct drm_device *dev);
|
||||||
void i915_gem_retire_requests(struct drm_device *dev);
|
void i915_gem_retire_requests(struct drm_device *dev);
|
||||||
int i915_gem_init_ringbuffer(struct drm_device *dev);
|
int i915_gem_init_ringbuffer(struct drm_device *dev);
|
||||||
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
|
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
|
||||||
|
|
|
@ -42,6 +42,26 @@
|
||||||
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
|
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
|
||||||
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
|
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
i915_enable_irq(struct drm_i915_private *dev_priv, uint32_t mask)
|
||||||
|
{
|
||||||
|
if ((dev_priv->irq_mask_reg & mask) != 0) {
|
||||||
|
dev_priv->irq_mask_reg &= ~mask;
|
||||||
|
I915_WRITE(IMR, dev_priv->irq_mask_reg);
|
||||||
|
(void) I915_READ(IMR);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
i915_disable_irq(struct drm_i915_private *dev_priv, uint32_t mask)
|
||||||
|
{
|
||||||
|
if ((dev_priv->irq_mask_reg & mask) != mask) {
|
||||||
|
dev_priv->irq_mask_reg |= mask;
|
||||||
|
I915_WRITE(IMR, dev_priv->irq_mask_reg);
|
||||||
|
(void) I915_READ(IMR);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* i915_get_pipe - return the the pipe associated with a given plane
|
* i915_get_pipe - return the the pipe associated with a given plane
|
||||||
* @dev: DRM device
|
* @dev: DRM device
|
||||||
|
@ -510,17 +530,27 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
||||||
int hotplug = 0;
|
int hotplug = 0;
|
||||||
int vblank = 0;
|
int vblank = 0;
|
||||||
|
|
||||||
/* On i8xx/i915 hw the IIR and IER are 16bit on i9xx its 32bit */
|
|
||||||
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
|
|
||||||
iir = I915_READ(IIR);
|
|
||||||
else
|
|
||||||
iir = I915_READ16(IIR);
|
|
||||||
|
|
||||||
if (dev->pdev->msi_enabled)
|
if (dev->pdev->msi_enabled)
|
||||||
I915_WRITE(IER, 0);
|
I915_WRITE(IMR, ~0);
|
||||||
|
iir = I915_READ(IIR);
|
||||||
if (!iir)
|
#if 0
|
||||||
|
DRM_DEBUG("flag=%08x\n", iir);
|
||||||
|
#endif
|
||||||
|
atomic_inc(&dev_priv->irq_received);
|
||||||
|
if (iir == 0) {
|
||||||
|
DRM_DEBUG ("iir 0x%08x im 0x%08x ie 0x%08x pipea 0x%08x pipeb 0x%08x\n",
|
||||||
|
iir,
|
||||||
|
I915_READ(IMR),
|
||||||
|
I915_READ(IER),
|
||||||
|
I915_READ(PIPEASTAT),
|
||||||
|
I915_READ(PIPEBSTAT));
|
||||||
|
if (dev->pdev->msi_enabled) {
|
||||||
|
I915_WRITE(IMR,
|
||||||
|
dev_priv->irq_mask_reg);
|
||||||
|
(void) I915_READ(IMR);
|
||||||
|
}
|
||||||
return IRQ_NONE;
|
return IRQ_NONE;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clear the PIPE(A|B)STAT regs before the IIR otherwise
|
* Clear the PIPE(A|B)STAT regs before the IIR otherwise
|
||||||
|
@ -528,13 +558,19 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
||||||
*/
|
*/
|
||||||
if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
|
if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
|
||||||
pipea_stats = I915_READ(PIPEASTAT);
|
pipea_stats = I915_READ(PIPEASTAT);
|
||||||
if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
|
I915_WRITE(PIPEASTAT, pipea_stats);
|
||||||
PIPE_VBLANK_INTERRUPT_STATUS))
|
|
||||||
{
|
|
||||||
vblank++;
|
|
||||||
drm_handle_vblank(dev, i915_get_plane(dev, 0));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
|
||||||
|
pipeb_stats = I915_READ(PIPEBSTAT);
|
||||||
|
I915_WRITE(PIPEBSTAT, pipeb_stats);
|
||||||
|
}
|
||||||
|
|
||||||
|
I915_WRITE(IIR, iir);
|
||||||
|
if (dev->pdev->msi_enabled)
|
||||||
|
I915_WRITE(IMR, dev_priv->irq_mask_reg);
|
||||||
|
(void) I915_READ(IIR); /* Flush posted writes */
|
||||||
|
|
||||||
/* This is a global event, and not a pipe A event */
|
/* This is a global event, and not a pipe A event */
|
||||||
if (pipea_stats & PIPE_HOTPLUG_INTERRUPT_STATUS)
|
if (pipea_stats & PIPE_HOTPLUG_INTERRUPT_STATUS)
|
||||||
hotplug = 1;
|
hotplug = 1;
|
||||||
|
@ -547,51 +583,31 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
||||||
I915_WRITE(TV_DAC, tvdac | TVDAC_STATE_CHG_EN);
|
I915_WRITE(TV_DAC, tvdac | TVDAC_STATE_CHG_EN);
|
||||||
}
|
}
|
||||||
|
|
||||||
I915_WRITE(PIPEASTAT, pipea_stats);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
|
|
||||||
pipeb_stats = I915_READ(PIPEBSTAT);
|
|
||||||
if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
|
|
||||||
PIPE_VBLANK_INTERRUPT_STATUS))
|
|
||||||
{
|
|
||||||
vblank++;
|
|
||||||
drm_handle_vblank(dev, i915_get_plane(dev, 1));
|
|
||||||
}
|
|
||||||
I915_WRITE(PIPEBSTAT, pipeb_stats);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Clear the generated interrupt */
|
|
||||||
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
|
|
||||||
I915_WRITE(IIR, iir);
|
|
||||||
(void) I915_READ(IIR);
|
|
||||||
} else {
|
|
||||||
I915_WRITE16(IIR, iir);
|
|
||||||
(void) I915_READ16(IIR);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (dev->primary->master) {
|
if (dev->primary->master) {
|
||||||
master_priv = dev->primary->master->driver_priv;
|
master_priv = dev->primary->master->driver_priv;
|
||||||
master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
|
master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (iir & I915_USER_INTERRUPT) {
|
if (iir & I915_USER_INTERRUPT) {
|
||||||
|
dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
|
||||||
DRM_WAKEUP(&dev_priv->irq_queue);
|
DRM_WAKEUP(&dev_priv->irq_queue);
|
||||||
#ifdef I915_HAVE_FENCE
|
#ifdef I915_HAVE_FENCE
|
||||||
i915_fence_handler(dev);
|
i915_fence_handler(dev);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pipea_stats & (I915_START_VBLANK_INTERRUPT_STATUS|
|
if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
|
||||||
I915_VBLANK_INTERRUPT_STATUS)) {
|
PIPE_VBLANK_INTERRUPT_STATUS)) {
|
||||||
vblank = 1;
|
vblank++;
|
||||||
drm_handle_vblank(dev, i915_get_plane(dev, 0));
|
drm_handle_vblank(dev, i915_get_plane(dev, 0));
|
||||||
}
|
}
|
||||||
if (pipeb_stats & (I915_START_VBLANK_INTERRUPT_STATUS|
|
|
||||||
I915_VBLANK_INTERRUPT_STATUS)) {
|
if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
|
||||||
vblank = 1;
|
PIPE_VBLANK_INTERRUPT_STATUS)) {
|
||||||
|
vblank++;
|
||||||
drm_handle_vblank(dev, i915_get_plane(dev, 1));
|
drm_handle_vblank(dev, i915_get_plane(dev, 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vblank) {
|
if (vblank) {
|
||||||
if (dev_priv->swaps_pending > 0)
|
if (dev_priv->swaps_pending > 0)
|
||||||
drm_locked_tasklet(dev, i915_vblank_tasklet);
|
drm_locked_tasklet(dev, i915_vblank_tasklet);
|
||||||
|
@ -616,9 +632,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
||||||
i915_run_hotplug_tasklet(dev, temp2);
|
i915_run_hotplug_tasklet(dev, temp2);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dev->pdev->msi_enabled)
|
|
||||||
I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
|
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -646,16 +659,9 @@ void i915_user_irq_on(struct drm_device *dev)
|
||||||
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
|
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
|
||||||
|
|
||||||
DRM_SPINLOCK(&dev_priv->user_irq_lock);
|
DRM_SPINLOCK(&dev_priv->user_irq_lock);
|
||||||
if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
|
if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1))
|
||||||
dev_priv->irq_mask_reg &= ~I915_USER_INTERRUPT;
|
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
|
||||||
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
|
|
||||||
I915_WRITE(IMR, dev_priv->irq_mask_reg);
|
|
||||||
else
|
|
||||||
I915_WRITE16(IMR, dev_priv->irq_mask_reg);
|
|
||||||
I915_READ16(IMR);
|
|
||||||
}
|
|
||||||
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
|
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void i915_user_irq_off(struct drm_device *dev)
|
void i915_user_irq_off(struct drm_device *dev)
|
||||||
|
@ -664,14 +670,8 @@ void i915_user_irq_off(struct drm_device *dev)
|
||||||
|
|
||||||
DRM_SPINLOCK(&dev_priv->user_irq_lock);
|
DRM_SPINLOCK(&dev_priv->user_irq_lock);
|
||||||
BUG_ON(dev_priv->irq_enabled && dev_priv->user_irq_refcount <= 0);
|
BUG_ON(dev_priv->irq_enabled && dev_priv->user_irq_refcount <= 0);
|
||||||
if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
|
if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0))
|
||||||
dev_priv->irq_mask_reg |= I915_USER_INTERRUPT;
|
i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
|
||||||
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
|
|
||||||
I915_WRITE(IMR, dev_priv->irq_mask_reg);
|
|
||||||
else
|
|
||||||
I915_WRITE16(IMR, dev_priv->irq_mask_reg);
|
|
||||||
I915_READ16(IMR);
|
|
||||||
}
|
|
||||||
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
|
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -803,11 +803,7 @@ int i915_enable_vblank(struct drm_device *dev, int plane)
|
||||||
}
|
}
|
||||||
|
|
||||||
DRM_SPINLOCK(&dev_priv->user_irq_lock);
|
DRM_SPINLOCK(&dev_priv->user_irq_lock);
|
||||||
dev_priv->irq_mask_reg &= ~mask_reg;
|
i915_enable_irq(dev_priv, mask_reg);
|
||||||
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
|
|
||||||
I915_WRITE(IMR, dev_priv->irq_mask_reg);
|
|
||||||
else
|
|
||||||
I915_WRITE16(IMR, dev_priv->irq_mask_reg);
|
|
||||||
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
|
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -837,11 +833,7 @@ void i915_disable_vblank(struct drm_device *dev, int plane)
|
||||||
}
|
}
|
||||||
|
|
||||||
DRM_SPINLOCK(&dev_priv->user_irq_lock);
|
DRM_SPINLOCK(&dev_priv->user_irq_lock);
|
||||||
dev_priv->irq_mask_reg |= mask_reg;
|
i915_disable_irq(dev_priv, mask_reg);
|
||||||
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
|
|
||||||
I915_WRITE(IMR, dev_priv->irq_mask_reg);
|
|
||||||
else
|
|
||||||
I915_WRITE16(IMR, dev_priv->irq_mask_reg);
|
|
||||||
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
|
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
|
||||||
|
|
||||||
if (pipestat_reg) {
|
if (pipestat_reg) {
|
||||||
|
@ -863,7 +855,7 @@ void i915_enable_interrupt (struct drm_device *dev)
|
||||||
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
|
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
|
||||||
struct drm_connector *o;
|
struct drm_connector *o;
|
||||||
|
|
||||||
dev_priv->irq_mask_reg &= ~I915_USER_INTERRUPT;
|
dev_priv->irq_mask_reg &= ~0;
|
||||||
|
|
||||||
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
|
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
|
||||||
if (dev->mode_config.num_connector)
|
if (dev->mode_config.num_connector)
|
||||||
|
@ -925,14 +917,9 @@ void i915_enable_interrupt (struct drm_device *dev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
|
|
||||||
I915_WRITE(IMR, dev_priv->irq_mask_reg);
|
I915_WRITE(IMR, dev_priv->irq_mask_reg);
|
||||||
I915_WRITE(IER, ~dev_priv->irq_mask_reg);
|
I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
|
||||||
} else {
|
(void) I915_READ (IER);
|
||||||
I915_WRITE16(IMR, dev_priv->irq_mask_reg);
|
|
||||||
I915_WRITE16(IER, ~(u16)dev_priv->irq_mask_reg);
|
|
||||||
}
|
|
||||||
|
|
||||||
dev_priv->irq_enabled = 1;
|
dev_priv->irq_enabled = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -964,17 +951,15 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
struct drm_i915_vblank_pipe *pipe = data;
|
struct drm_i915_vblank_pipe *pipe = data;
|
||||||
u16 flag;
|
u32 flag = 0;
|
||||||
|
|
||||||
if (!dev_priv) {
|
if (!dev_priv) {
|
||||||
DRM_ERROR("called with no initialization\n");
|
DRM_ERROR("called with no initialization\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
|
if (dev_priv->irq_enabled)
|
||||||
flag = I915_READ(IER);
|
flag = ~dev_priv->irq_mask_reg;
|
||||||
else
|
|
||||||
flag = I915_READ16(IER);
|
|
||||||
|
|
||||||
pipe->pipe = 0;
|
pipe->pipe = 0;
|
||||||
if (flag & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT)
|
if (flag & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT)
|
||||||
|
@ -1158,19 +1143,12 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
|
||||||
tmp = I915_READ(PIPEBSTAT);
|
tmp = I915_READ(PIPEBSTAT);
|
||||||
I915_WRITE(PIPEBSTAT, tmp);
|
I915_WRITE(PIPEBSTAT, tmp);
|
||||||
|
|
||||||
|
atomic_set(&dev_priv->irq_received, 0);
|
||||||
I915_WRITE16(HWSTAM, 0xeffe);
|
I915_WRITE(HWSTAM, 0xffff);
|
||||||
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
|
|
||||||
I915_WRITE(IMR, 0x0);
|
|
||||||
I915_WRITE(IER, 0x0);
|
I915_WRITE(IER, 0x0);
|
||||||
tmp = I915_READ(IIR);
|
I915_WRITE(IMR, 0xffffffff);
|
||||||
I915_WRITE(IIR, tmp);
|
I915_WRITE(IIR, 0xffffffff);
|
||||||
} else {
|
(void) I915_READ(IIR);
|
||||||
I915_WRITE16(IMR, 0x0);
|
|
||||||
I915_WRITE16(IER, 0x0);
|
|
||||||
tmp = I915_READ16(IIR);
|
|
||||||
I915_WRITE16(IIR, tmp);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int i915_driver_irq_postinstall(struct drm_device * dev)
|
int i915_driver_irq_postinstall(struct drm_device * dev)
|
||||||
|
|
Loading…
Reference in New Issue