Merge commit 'origin/drm-gem' into modesetting-gem
Use new GEM based ring buffer initialization. Still need to init GEM & use it for framebuffer allocation etc. Conflicts: shared-core/i915_dma.c shared-core/i915_drv.hmain
commit
f5412a944f
|
@ -1453,6 +1453,9 @@ drm_gem_init (struct drm_device *dev);
|
|||
void
|
||||
drm_gem_object_free (struct kref *kref);
|
||||
|
||||
struct drm_gem_object *
|
||||
drm_gem_object_alloc(struct drm_device *dev, size_t size);
|
||||
|
||||
void
|
||||
drm_gem_object_handle_free (struct kref *kref);
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ drm_gem_init(struct drm_device *dev)
|
|||
/**
|
||||
* Allocate a GEM object of the specified size with shmfs backing store
|
||||
*/
|
||||
static struct drm_gem_object *
|
||||
struct drm_gem_object *
|
||||
drm_gem_object_alloc(struct drm_device *dev, size_t size)
|
||||
{
|
||||
struct drm_gem_object *obj;
|
||||
|
@ -117,6 +117,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
|
|||
atomic_inc(&dev->object_count);
|
||||
return obj;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_object_alloc);
|
||||
|
||||
/**
|
||||
* Removes the mapping from handle to filp for this object.
|
||||
|
|
|
@ -702,14 +702,17 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
|
|||
BUG_ON(obj_priv->page_list != NULL);
|
||||
obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
|
||||
DRM_MEM_DRIVER);
|
||||
if (obj_priv->page_list == NULL)
|
||||
if (obj_priv->page_list == NULL) {
|
||||
DRM_ERROR("Faled to allocate page list\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < page_count; i++) {
|
||||
obj_priv->page_list[i] =
|
||||
find_or_create_page(obj->filp->f_mapping, i, GFP_HIGHUSER);
|
||||
|
||||
if (obj_priv->page_list[i] == NULL) {
|
||||
DRM_ERROR("Failed to find_or_create_page()\n");
|
||||
i915_gem_object_free_page_list(obj);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -758,14 +761,17 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
|
|||
DRM_INFO("%s: GTT full, evicting something\n", __func__);
|
||||
#endif
|
||||
if (list_empty(&dev_priv->mm.inactive_list) &&
|
||||
list_empty(&dev_priv->mm.flushing_list) &&
|
||||
list_empty(&dev_priv->mm.active_list)) {
|
||||
DRM_ERROR("GTT full, but LRU list empty\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = i915_gem_evict_something(dev);
|
||||
if (ret != 0)
|
||||
if (ret != 0) {
|
||||
DRM_ERROR("Failed to evict a buffer\n");
|
||||
return ret;
|
||||
}
|
||||
goto search_free;
|
||||
}
|
||||
|
||||
|
@ -1383,6 +1389,7 @@ int
|
|||
i915_gem_execbuffer(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_execbuffer *args = data;
|
||||
struct drm_i915_gem_exec_object *exec_list = NULL;
|
||||
struct drm_gem_object **object_list = NULL;
|
||||
|
@ -1423,6 +1430,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
|
|||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
if (dev_priv->mm.suspended) {
|
||||
DRM_ERROR("Execbuf while VT-switched.\n");
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* Zero the gloabl flush/invalidate flags. These
|
||||
* will be modified as each object is bound to the
|
||||
* gtt
|
||||
|
@ -1559,6 +1572,37 @@ pre_mutex_err:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
||||
int ret;
|
||||
|
||||
if (obj_priv->gtt_space == NULL) {
|
||||
ret = i915_gem_object_bind_to_gtt(obj, alignment);
|
||||
if (ret != 0) {
|
||||
DRM_ERROR("Failure to bind in "
|
||||
"i915_gem_pin_ioctl(): %d\n",
|
||||
ret);
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
obj_priv->pin_count++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
i915_gem_object_unpin(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
||||
|
||||
obj_priv->pin_count--;
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_pin_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
|
@ -1578,22 +1622,15 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
obj_priv = obj->driver_private;
|
||||
if (obj_priv->gtt_space == NULL) {
|
||||
ret = i915_gem_object_bind_to_gtt(obj,
|
||||
(unsigned) args->alignment);
|
||||
|
||||
ret = i915_gem_object_pin(obj, args->alignment);
|
||||
if (ret != 0) {
|
||||
DRM_ERROR("Failure to bind in "
|
||||
"i915_gem_pin_ioctl(): %d\n",
|
||||
ret);
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
obj_priv->pin_count++;
|
||||
args->offset = obj_priv->gtt_offset;
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -1607,7 +1644,6 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
|
|||
{
|
||||
struct drm_i915_gem_pin *args = data;
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
|
@ -1620,8 +1656,8 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
obj_priv = obj->driver_private;
|
||||
obj_priv->pin_count--;
|
||||
i915_gem_object_unpin(obj);
|
||||
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return 0;
|
||||
|
@ -1757,3 +1793,173 @@ i915_gem_lastclose(struct drm_device *dev)
|
|||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_init_ringbuffer(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
int ret;
|
||||
|
||||
obj = drm_gem_object_alloc(dev, 128 * 1024);
|
||||
if (obj == NULL) {
|
||||
DRM_ERROR("Failed to allocate ringbuffer\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
obj_priv = obj->driver_private;
|
||||
|
||||
ret = i915_gem_object_pin(obj, 4096);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
/* Set up the kernel mapping for the ring. */
|
||||
dev_priv->ring.Size = obj->size;
|
||||
dev_priv->ring.tail_mask = obj->size - 1;
|
||||
|
||||
dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
|
||||
dev_priv->ring.map.size = obj->size;
|
||||
dev_priv->ring.map.type = 0;
|
||||
dev_priv->ring.map.flags = 0;
|
||||
dev_priv->ring.map.mtrr = 0;
|
||||
|
||||
drm_core_ioremap(&dev_priv->ring.map, dev);
|
||||
if (dev_priv->ring.map.handle == NULL) {
|
||||
DRM_ERROR("Failed to map ringbuffer.\n");
|
||||
memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
|
||||
drm_gem_object_unreference(obj);
|
||||
return -EINVAL;
|
||||
}
|
||||
dev_priv->ring.ring_obj = obj;
|
||||
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
|
||||
|
||||
/* Stop the ring if it's running. */
|
||||
I915_WRITE(LP_RING + RING_LEN, 0);
|
||||
I915_WRITE(LP_RING + RING_HEAD, 0);
|
||||
I915_WRITE(LP_RING + RING_TAIL, 0);
|
||||
I915_WRITE(LP_RING + RING_START, 0);
|
||||
|
||||
/* Initialize the ring. */
|
||||
I915_WRITE(LP_RING + RING_START, obj_priv->gtt_offset);
|
||||
I915_WRITE(LP_RING + RING_LEN,
|
||||
((obj->size - 4096) & RING_NR_PAGES) |
|
||||
RING_NO_REPORT |
|
||||
RING_VALID);
|
||||
|
||||
/* Update our cache of the ring state */
|
||||
i915_kernel_lost_context(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->ring.ring_obj == NULL)
|
||||
return;
|
||||
|
||||
drm_core_ioremapfree(&dev_priv->ring.map, dev);
|
||||
|
||||
i915_gem_object_unpin(dev_priv->ring.ring_obj);
|
||||
drm_gem_object_unreference(dev_priv->ring.ring_obj);
|
||||
|
||||
memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_init_ringbuffer(dev);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
dev_priv->mm.suspended = 0;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Unbinds all objects that are on the given buffer list. */
|
||||
static int
|
||||
i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
|
||||
{
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
int ret;
|
||||
|
||||
while (!list_empty(head)) {
|
||||
obj_priv = list_first_entry(head,
|
||||
struct drm_i915_gem_object,
|
||||
list);
|
||||
obj = obj_priv->obj;
|
||||
|
||||
if (obj_priv->pin_count != 0) {
|
||||
DRM_ERROR("Pinned object in unbind list\n");
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_unbind(obj);
|
||||
if (ret != 0) {
|
||||
DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
|
||||
ret);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
/* Hack! Don't let anybody do execbuf while we don't control the chip.
|
||||
* We need to replace this with a semaphore, or something.
|
||||
*/
|
||||
dev_priv->mm.suspended = 1;
|
||||
|
||||
/* Move all buffers out of the GTT. */
|
||||
i915_gem_evict_from_list(dev, &dev_priv->mm.active_list);
|
||||
i915_gem_evict_from_list(dev, &dev_priv->mm.flushing_list);
|
||||
i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
|
||||
|
||||
/* Make sure the harware's idle. */
|
||||
while (!list_empty(&dev_priv->mm.request_list)) {
|
||||
struct drm_i915_gem_request *request;
|
||||
int ret;
|
||||
|
||||
request = list_first_entry(&dev_priv->mm.request_list,
|
||||
struct drm_i915_gem_request,
|
||||
list);
|
||||
|
||||
ret = i915_wait_request(dev, request->seqno);
|
||||
if (ret != 0) {
|
||||
DRM_ERROR("Error waiting for idle at LeaveVT: %d\n",
|
||||
ret);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
BUG_ON(!list_empty(&dev_priv->mm.active_list));
|
||||
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
|
||||
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
|
||||
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -232,18 +232,14 @@ static int i915_initialize(struct drm_device * dev,
|
|||
}
|
||||
#endif
|
||||
|
||||
if (!dev_priv->ring.Size) {
|
||||
dev_priv->ring.Start = init->ring_start;
|
||||
dev_priv->ring.End = init->ring_end;
|
||||
if (init->ring_size != 0) {
|
||||
dev_priv->ring.Size = init->ring_size;
|
||||
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
|
||||
|
||||
dev_priv->ring.map.offset = init->ring_start;
|
||||
dev_priv->ring.map.size = init->ring_size;
|
||||
dev_priv->ring.map.type = 0;
|
||||
dev_priv->ring.map.flags = 0;
|
||||
dev_priv->ring.map.mtrr = 0;
|
||||
|
||||
drm_core_ioremap(&dev_priv->ring.map, dev);
|
||||
|
||||
if (dev_priv->ring.map.handle == NULL) {
|
||||
|
@ -255,7 +251,6 @@ static int i915_initialize(struct drm_device * dev,
|
|||
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
|
||||
}
|
||||
|
||||
|
||||
dev_priv->cpp = init->cpp;
|
||||
master_priv->sarea_priv->pf_current_page = 0;
|
||||
|
||||
|
@ -1091,6 +1086,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
|
|||
DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH),
|
||||
};
|
||||
|
||||
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
|
||||
|
|
|
@ -182,6 +182,8 @@ typedef struct drm_i915_sarea {
|
|||
#define DRM_I915_GEM_UNPIN 0x16
|
||||
#define DRM_I915_GEM_BUSY 0x17
|
||||
#define DRM_I915_GEM_THROTTLE 0x18
|
||||
#define DRM_I915_GEM_ENTERVT 0x19
|
||||
#define DRM_I915_GEM_LEAVEVT 0x20
|
||||
|
||||
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
|
||||
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
|
||||
|
@ -207,6 +209,8 @@ typedef struct drm_i915_sarea {
|
|||
#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
|
||||
#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
|
||||
#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
|
||||
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
|
||||
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
|
||||
|
||||
/* Asynchronous page flipping:
|
||||
*/
|
||||
|
|
|
@ -81,14 +81,13 @@ struct drm_i915_validate_buffer;
|
|||
|
||||
struct drm_i915_ring_buffer {
|
||||
int tail_mask;
|
||||
unsigned long Start;
|
||||
unsigned long End;
|
||||
unsigned long Size;
|
||||
u8 *virtual_start;
|
||||
int head;
|
||||
int tail;
|
||||
int space;
|
||||
drm_local_map_t map;
|
||||
struct drm_gem_object *ring_obj;
|
||||
};
|
||||
|
||||
struct mem_block {
|
||||
|
@ -236,6 +235,16 @@ struct drm_i915_private {
|
|||
struct work_struct retire_task;
|
||||
|
||||
uint32_t next_gem_seqno;
|
||||
|
||||
/**
|
||||
* Flag if the X Server, and thus DRM, is not currently in
|
||||
* control of the device.
|
||||
*
|
||||
* This is set between LeaveVT and EnterVT. It needs to be
|
||||
* replaced with a semaphore. It also needs to be
|
||||
* transitioned away from for kernel modesetting.
|
||||
*/
|
||||
int suspended;
|
||||
} mm;
|
||||
|
||||
struct work_struct user_interrupt_task;
|
||||
|
@ -502,8 +511,14 @@ int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
|
|||
struct drm_file *file_priv);
|
||||
int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_init_object(struct drm_gem_object *obj);
|
||||
void i915_gem_free_object(struct drm_gem_object *obj);
|
||||
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
|
||||
void i915_gem_object_unpin(struct drm_gem_object *obj);
|
||||
int i915_gem_set_domain(struct drm_gem_object *obj,
|
||||
struct drm_file *file_priv,
|
||||
uint32_t read_domains,
|
||||
|
@ -514,6 +529,7 @@ void i915_gem_lastclose(struct drm_device *dev);
|
|||
void i915_gem_retire_requests(struct drm_device *dev);
|
||||
void i915_gem_retire_timeout(unsigned long data);
|
||||
void i915_gem_retire_handler(struct work_struct *work);
|
||||
int i915_gem_init_ringbuffer(struct drm_device *dev);
|
||||
#endif
|
||||
|
||||
extern unsigned int i915_fbpercrtc;
|
||||
|
|
|
@ -109,47 +109,9 @@ int i915_load_modeset_init(struct drm_device *dev)
|
|||
i915_probe_agp(dev->pdev, &agp_size, &prealloc_size);
|
||||
printk("setting up %ld bytes of VRAM space\n", prealloc_size);
|
||||
printk("setting up %ld bytes of TT space\n", (agp_size - prealloc_size));
|
||||
|
||||
drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, prealloc_size >> PAGE_SHIFT, 1);
|
||||
drm_bo_init_mm(dev, DRM_BO_MEM_TT, prealloc_size >> PAGE_SHIFT,
|
||||
(agp_size - prealloc_size) >> PAGE_SHIFT, 1);
|
||||
I915_WRITE(PRB0_CTL, 0);
|
||||
I915_WRITE(PRB0_HEAD, 0);
|
||||
I915_WRITE(PRB0_TAIL, 0);
|
||||
|
||||
size = PRIMARY_RINGBUFFER_SIZE;
|
||||
ret = drm_buffer_object_create(dev, size, drm_bo_type_kernel,
|
||||
DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
|
||||
DRM_BO_FLAG_MEM_VRAM |
|
||||
DRM_BO_FLAG_NO_EVICT,
|
||||
DRM_BO_HINT_DONT_FENCE, 0x1, 0,
|
||||
&dev_priv->ring_buffer);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("Unable to allocate or pin ring buffer\n");
|
||||
goto clean_mm;
|
||||
}
|
||||
|
||||
/* remap the buffer object properly */
|
||||
dev_priv->ring.Start = dev_priv->ring_buffer->offset;
|
||||
dev_priv->ring.End = dev_priv->ring.Start + size;
|
||||
dev_priv->ring.Size = size;
|
||||
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
|
||||
|
||||
/* FIXME: need wrapper with PCI mem checks */
|
||||
ret = drm_mem_reg_ioremap(dev, &dev_priv->ring_buffer->mem,
|
||||
(void **) &dev_priv->ring.virtual_start);
|
||||
if (ret) {
|
||||
DRM_ERROR("error mapping ring buffer: %d\n", ret);
|
||||
goto destroy_ringbuffer;
|
||||
}
|
||||
|
||||
DRM_DEBUG("ring start %08lX, %p, %08lX\n", dev_priv->ring.Start,
|
||||
dev_priv->ring.virtual_start, dev_priv->ring.Size);
|
||||
|
||||
memset((void *)(dev_priv->ring.virtual_start), 0, dev_priv->ring.Size);
|
||||
I915_WRITE(PRB0_START, dev_priv->ring.Start);
|
||||
I915_WRITE(PRB0_CTL, ((dev_priv->ring.Size - 4096) & RING_NR_PAGES) |
|
||||
(RING_NO_REPORT | RING_VALID));
|
||||
ret = i915_gem_init_ringbuffer(dev);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* Allow hardware batchbuffers unless told otherwise.
|
||||
*/
|
||||
|
@ -262,9 +224,7 @@ destroy_ringbuffer:
|
|||
dev_priv->ring.virtual_start);
|
||||
if (dev_priv->ring_buffer)
|
||||
drm_bo_usage_deref_unlocked(&dev_priv->ring_buffer);
|
||||
clean_mm:
|
||||
drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM, 1);
|
||||
drm_bo_clean_mm(dev, DRM_BO_MEM_TT, 1);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -670,7 +670,7 @@ void i915_user_irq_off(struct drm_device *dev)
|
|||
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
|
||||
|
||||
DRM_SPINLOCK(&dev_priv->user_irq_lock);
|
||||
BUG_ON(dev_priv->user_irq_refcount <= 0);
|
||||
BUG_ON(dev_priv->irq_enabled && dev_priv->user_irq_refcount <= 0);
|
||||
if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
|
||||
dev_priv->irq_mask_reg |= I915_USER_INTERRUPT;
|
||||
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
|
||||
|
@ -689,6 +689,11 @@ int i915_wait_irq(struct drm_device * dev, int irq_nr)
|
|||
struct drm_i915_master_private *master_priv;
|
||||
int ret = 0;
|
||||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("called with no initialization\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
|
||||
READ_BREADCRUMB(dev_priv));
|
||||
|
||||
|
|
Loading…
Reference in New Issue