2008-05-01 12:39:06 -06:00
|
|
|
/*
|
|
|
|
* Copyright © 2008 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Eric Anholt <eric@anholt.net>
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "drmP.h"
|
|
|
|
#include "drm.h"
|
|
|
|
#include "i915_drm.h"
|
|
|
|
#include "i915_drv.h"
|
|
|
|
|
2008-05-30 11:04:22 -06:00
|
|
|
#define WATCH_COHERENCY 0
|
2008-05-06 22:59:06 -06:00
|
|
|
#define WATCH_BUF 0
|
|
|
|
#define WATCH_EXEC 0
|
|
|
|
#define WATCH_LRU 0
|
2008-05-08 11:44:02 -06:00
|
|
|
#define WATCH_RELOC 0
|
2008-05-05 23:10:02 -06:00
|
|
|
|
2008-06-02 11:59:15 -06:00
|
|
|
static int
|
2008-05-22 12:34:56 -06:00
|
|
|
i915_gem_object_set_domain(struct drm_gem_object *obj,
|
|
|
|
uint32_t read_domains,
|
|
|
|
uint32_t write_domain);
|
2008-06-11 15:42:40 -06:00
|
|
|
int
|
|
|
|
i915_gem_set_domain(struct drm_gem_object *obj,
|
|
|
|
struct drm_file *file_priv,
|
|
|
|
uint32_t read_domains,
|
|
|
|
uint32_t write_domain);
|
2008-05-22 12:34:56 -06:00
|
|
|
|
2008-05-01 12:39:06 -06:00
|
|
|
int
|
|
|
|
i915_gem_init_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
struct drm_i915_gem_init *args = data;
|
|
|
|
|
2008-05-15 12:21:11 -06:00
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
|
2008-05-01 12:39:06 -06:00
|
|
|
if (args->gtt_start >= args->gtt_end ||
|
|
|
|
(args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
|
2008-05-15 12:21:11 -06:00
|
|
|
(args->gtt_end & (PAGE_SIZE - 1)) != 0) {
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
2008-05-01 12:39:06 -06:00
|
|
|
return -EINVAL;
|
2008-05-15 12:21:11 -06:00
|
|
|
}
|
2008-05-01 12:39:06 -06:00
|
|
|
|
|
|
|
drm_memrange_init(&dev_priv->mm.gtt_space, args->gtt_start,
|
2008-05-09 19:23:51 -06:00
|
|
|
args->gtt_end - args->gtt_start);
|
2008-05-01 12:39:06 -06:00
|
|
|
|
2008-05-15 12:21:11 -06:00
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
2008-05-01 12:39:06 -06:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-06-11 15:42:40 -06:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Creates a new mm object and returns a handle to it.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
i915_gem_create_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_create *args = data;
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
int handle, ret;
|
|
|
|
|
|
|
|
args->size = roundup(args->size, PAGE_SIZE);
|
|
|
|
|
|
|
|
/* Allocate the new object */
|
|
|
|
obj = drm_gem_object_alloc(dev, args->size);
|
|
|
|
if (obj == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ret = drm_gem_handle_create(file_priv, obj, &handle);
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
drm_gem_object_handle_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
args->handle = handle;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Reads data from the object referenced by handle.
|
|
|
|
*
|
|
|
|
* On error, the contents of *data are undefined.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_pread *args = data;
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
ssize_t read;
|
|
|
|
loff_t offset;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
|
|
|
if (obj == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
ret = i915_gem_set_domain(obj, file_priv,
|
|
|
|
I915_GEM_DOMAIN_CPU, 0);
|
|
|
|
if (ret) {
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
offset = args->offset;
|
|
|
|
|
|
|
|
read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
|
|
|
|
args->size, &offset);
|
|
|
|
if (read != args->size) {
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
if (read < 0)
|
|
|
|
return read;
|
|
|
|
else
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Writes data to the object referenced by handle.
|
|
|
|
*
|
|
|
|
* On error, the contents of the buffer that were to be modified are undefined.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_pwrite *args = data;
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
ssize_t written;
|
|
|
|
loff_t offset;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
|
|
|
if (obj == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
ret = i915_gem_set_domain(obj, file_priv,
|
|
|
|
I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
|
|
|
|
if (ret) {
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
offset = args->offset;
|
|
|
|
|
|
|
|
written = vfs_write(obj->filp,
|
|
|
|
(char __user *)(uintptr_t) args->data_ptr,
|
|
|
|
args->size, &offset);
|
|
|
|
|
|
|
|
if (written != args->size) {
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
if (written < 0)
|
|
|
|
return written;
|
|
|
|
else
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Called when user space prepares to use an object
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_set_domain *args = data;
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!(dev->driver->driver_features & DRIVER_GEM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
|
|
|
if (obj == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
ret = i915_gem_set_domain(obj, file_priv,
|
|
|
|
args->read_domains, args->write_domain);
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Maps the contents of an object, returning the address it is mapped
|
|
|
|
* into.
|
|
|
|
*
|
|
|
|
* While the mapping holds a reference on the contents of the object, it doesn't
|
|
|
|
* imply a ref on the object itself.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_mmap *args = data;
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
loff_t offset;
|
|
|
|
unsigned long addr;
|
|
|
|
|
|
|
|
if (!(dev->driver->driver_features & DRIVER_GEM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
|
|
|
if (obj == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
offset = args->offset;
|
|
|
|
|
|
|
|
down_write(¤t->mm->mmap_sem);
|
|
|
|
addr = do_mmap(obj->filp, 0, args->size,
|
|
|
|
PROT_READ | PROT_WRITE, MAP_SHARED,
|
|
|
|
args->offset);
|
|
|
|
up_write(¤t->mm->mmap_sem);
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
if (IS_ERR((void *)addr))
|
|
|
|
return addr;
|
|
|
|
|
|
|
|
args->addr_ptr = (uint64_t) addr;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-05-01 12:39:06 -06:00
|
|
|
static void
|
2008-05-02 11:36:00 -06:00
|
|
|
i915_gem_object_free_page_list(struct drm_gem_object *obj)
|
2008-05-01 12:39:06 -06:00
|
|
|
{
|
2008-05-01 15:20:44 -06:00
|
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
|
int page_count = obj->size / PAGE_SIZE;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (obj_priv->page_list == NULL)
|
|
|
|
return;
|
|
|
|
|
2008-05-08 12:45:53 -06:00
|
|
|
|
|
|
|
for (i = 0; i < page_count; i++)
|
|
|
|
if (obj_priv->page_list[i] != NULL)
|
|
|
|
page_cache_release(obj_priv->page_list[i]);
|
|
|
|
|
2008-05-01 15:20:44 -06:00
|
|
|
drm_free(obj_priv->page_list,
|
|
|
|
page_count * sizeof(struct page *),
|
|
|
|
DRM_MEM_DRIVER);
|
|
|
|
obj_priv->page_list = NULL;
|
|
|
|
}
|
|
|
|
|
2008-05-20 11:48:36 -06:00
|
|
|
static void
|
|
|
|
i915_gem_object_move_to_active(struct drm_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->dev;
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
|
|
|
|
|
/* Add a reference if we're newly entering the active list. */
|
|
|
|
if (!obj_priv->active) {
|
|
|
|
drm_gem_object_reference(obj);
|
|
|
|
obj_priv->active = 1;
|
|
|
|
}
|
|
|
|
/* Move from whatever list we were on to the tail of execution. */
|
|
|
|
list_move_tail(&obj_priv->list,
|
|
|
|
&dev_priv->mm.active_list);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->dev;
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
|
|
|
|
|
if (obj_priv->pin_count != 0)
|
|
|
|
list_del_init(&obj_priv->list);
|
|
|
|
else
|
|
|
|
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
|
|
|
|
|
|
|
|
if (obj_priv->active) {
|
|
|
|
obj_priv->active = 0;
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-05-20 15:03:27 -06:00
|
|
|
/**
|
|
|
|
* Creates a new sequence number, emitting a write of it to the status page
|
|
|
|
* plus an interrupt, which will trigger i915_user_interrupt_handler.
|
|
|
|
*
|
|
|
|
* Must be called with struct_lock held.
|
|
|
|
*
|
|
|
|
* Returned sequence numbers are nonzero on success.
|
|
|
|
*/
|
|
|
|
static uint32_t
|
2008-05-14 16:13:14 -06:00
|
|
|
i915_add_request(struct drm_device *dev, uint32_t flush_domains)
|
2008-05-20 15:03:27 -06:00
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
struct drm_i915_gem_request *request;
|
|
|
|
uint32_t seqno;
|
2008-06-10 19:11:15 -06:00
|
|
|
int was_empty;
|
2008-05-20 15:03:27 -06:00
|
|
|
RING_LOCALS;
|
|
|
|
|
|
|
|
request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
|
|
|
|
if (request == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Grab the seqno we're going to make this request be, and bump the
|
|
|
|
* next (skipping 0 so it can be the reserved no-seqno value).
|
|
|
|
*/
|
|
|
|
seqno = dev_priv->mm.next_gem_seqno;
|
|
|
|
dev_priv->mm.next_gem_seqno++;
|
|
|
|
if (dev_priv->mm.next_gem_seqno == 0)
|
|
|
|
dev_priv->mm.next_gem_seqno++;
|
|
|
|
|
|
|
|
BEGIN_LP_RING(4);
|
|
|
|
OUT_RING(CMD_STORE_DWORD_IDX);
|
|
|
|
OUT_RING(I915_GEM_HWS_INDEX << STORE_DWORD_INDEX_SHIFT);
|
|
|
|
OUT_RING(seqno);
|
|
|
|
|
|
|
|
OUT_RING(GFX_OP_USER_INTERRUPT);
|
|
|
|
ADVANCE_LP_RING();
|
|
|
|
|
|
|
|
DRM_DEBUG("%d\n", seqno);
|
|
|
|
|
|
|
|
request->seqno = seqno;
|
2008-05-20 17:27:05 -06:00
|
|
|
request->emitted_jiffies = jiffies;
|
2008-05-14 16:13:14 -06:00
|
|
|
request->flush_domains = flush_domains;
|
2008-06-10 19:11:15 -06:00
|
|
|
was_empty = list_empty(&dev_priv->mm.request_list);
|
2008-05-20 15:03:27 -06:00
|
|
|
list_add_tail(&request->list, &dev_priv->mm.request_list);
|
|
|
|
|
2008-06-10 19:11:15 -06:00
|
|
|
if (was_empty)
|
|
|
|
schedule_delayed_work (&dev_priv->mm.retire_work, HZ);
|
2008-05-20 15:03:27 -06:00
|
|
|
return seqno;
|
|
|
|
}
|
|
|
|
|
2008-05-22 11:59:59 -06:00
|
|
|
/**
|
|
|
|
* Command execution barrier
|
|
|
|
*
|
|
|
|
* Ensures that all commands in the ring are finished
|
|
|
|
* before signalling the CPU
|
|
|
|
*/
|
|
|
|
|
|
|
|
uint32_t
|
|
|
|
i915_retire_commands(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
uint32_t cmd = CMD_MI_FLUSH | MI_NO_WRITE_FLUSH;
|
|
|
|
uint32_t flush_domains = 0;
|
|
|
|
RING_LOCALS;
|
|
|
|
|
|
|
|
/* The sampler always gets flushed on i965 (sigh) */
|
|
|
|
if (IS_I965G(dev))
|
2008-06-11 15:42:40 -06:00
|
|
|
flush_domains |= I915_GEM_DOMAIN_SAMPLER;
|
2008-05-22 11:59:59 -06:00
|
|
|
BEGIN_LP_RING(2);
|
|
|
|
OUT_RING(cmd);
|
|
|
|
OUT_RING(0); /* noop */
|
|
|
|
ADVANCE_LP_RING();
|
|
|
|
return flush_domains;
|
|
|
|
}
|
|
|
|
|
2008-05-14 16:13:14 -06:00
|
|
|
/**
|
|
|
|
* Moves buffers associated only with the given active seqno from the active
|
|
|
|
* to inactive list, potentially freeing them.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
i915_gem_retire_request(struct drm_device *dev,
|
|
|
|
struct drm_i915_gem_request *request)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
|
|
|
|
if (request->flush_domains != 0) {
|
|
|
|
struct drm_i915_gem_object *obj_priv, *next;
|
|
|
|
|
|
|
|
/* First clear any buffers that were only waiting for a flush
|
|
|
|
* matching the one just retired.
|
|
|
|
*/
|
|
|
|
|
|
|
|
list_for_each_entry_safe(obj_priv, next,
|
|
|
|
&dev_priv->mm.flushing_list, list) {
|
|
|
|
struct drm_gem_object *obj = obj_priv->obj;
|
|
|
|
|
|
|
|
if (obj->write_domain & request->flush_domains) {
|
|
|
|
obj->write_domain = 0;
|
|
|
|
i915_gem_object_move_to_inactive(obj);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Move any buffers on the active list that are no longer referenced
|
|
|
|
* by the ringbuffer to the flushing/inactive lists as appropriate.
|
|
|
|
*/
|
|
|
|
while (!list_empty(&dev_priv->mm.active_list)) {
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
struct drm_i915_gem_object *obj_priv;
|
|
|
|
|
|
|
|
obj_priv = list_first_entry(&dev_priv->mm.active_list,
|
|
|
|
struct drm_i915_gem_object,
|
|
|
|
list);
|
|
|
|
obj = obj_priv->obj;
|
|
|
|
|
|
|
|
/* If the seqno being retired doesn't match the oldest in the
|
|
|
|
* list, then the oldest in the list must still be newer than
|
|
|
|
* this seqno.
|
|
|
|
*/
|
|
|
|
if (obj_priv->last_rendering_seqno != request->seqno)
|
|
|
|
return;
|
|
|
|
#if WATCH_LRU
|
|
|
|
DRM_INFO("%s: retire %d moves to inactive list %p\n",
|
2008-05-27 17:49:49 -06:00
|
|
|
__func__, request->seqno, obj);
|
2008-05-14 16:13:14 -06:00
|
|
|
#endif
|
|
|
|
|
|
|
|
if (obj->write_domain != 0) {
|
|
|
|
list_move_tail(&obj_priv->list,
|
|
|
|
&dev_priv->mm.flushing_list);
|
|
|
|
} else {
|
|
|
|
i915_gem_object_move_to_inactive(obj);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-05-20 15:03:27 -06:00
|
|
|
/**
|
|
|
|
* Returns true if seq1 is later than seq2.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
i915_seqno_passed(uint32_t seq1, uint32_t seq2)
|
|
|
|
{
|
|
|
|
return (int32_t)(seq1 - seq2) >= 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t
|
|
|
|
i915_get_gem_seqno(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
|
|
|
|
return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This function clears the request list as sequence numbers are passed.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
i915_gem_retire_requests(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
uint32_t seqno;
|
|
|
|
|
|
|
|
seqno = i915_get_gem_seqno(dev);
|
|
|
|
|
|
|
|
while (!list_empty(&dev_priv->mm.request_list)) {
|
|
|
|
struct drm_i915_gem_request *request;
|
|
|
|
uint32_t retiring_seqno;
|
|
|
|
|
|
|
|
request = list_first_entry(&dev_priv->mm.request_list,
|
|
|
|
struct drm_i915_gem_request,
|
|
|
|
list);
|
|
|
|
retiring_seqno = request->seqno;
|
|
|
|
|
|
|
|
if (i915_seqno_passed(seqno, retiring_seqno)) {
|
2008-05-14 16:13:14 -06:00
|
|
|
i915_gem_retire_request(dev, request);
|
|
|
|
|
2008-05-20 15:03:27 -06:00
|
|
|
list_del(&request->list);
|
|
|
|
drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
|
|
|
|
} else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-06-06 13:59:52 -06:00
|
|
|
void
|
2008-06-13 10:19:30 -06:00
|
|
|
i915_gem_retire_work_handler(struct work_struct *work)
|
2008-06-06 13:59:52 -06:00
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv;
|
|
|
|
struct drm_device *dev;
|
|
|
|
|
|
|
|
dev_priv = container_of(work, drm_i915_private_t,
|
2008-06-13 10:19:30 -06:00
|
|
|
mm.retire_work.work);
|
2008-06-06 13:59:52 -06:00
|
|
|
dev = dev_priv->dev;
|
|
|
|
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
i915_gem_retire_requests(dev);
|
|
|
|
if (!list_empty(&dev_priv->mm.request_list))
|
2008-06-13 10:19:30 -06:00
|
|
|
schedule_delayed_work (&dev_priv->mm.retire_work, HZ);
|
2008-06-06 13:59:52 -06:00
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
}
|
|
|
|
|
2008-05-20 15:03:27 -06:00
|
|
|
/**
|
|
|
|
* Waits for a sequence number to be signaled, and cleans up the
|
|
|
|
* request and object lists appropriately for that event.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
i915_wait_request(struct drm_device *dev, uint32_t seqno)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
BUG_ON(seqno == 0);
|
|
|
|
|
2008-05-26 04:25:16 -06:00
|
|
|
if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
|
|
|
|
i915_user_irq_on(dev_priv);
|
|
|
|
ret = wait_event_interruptible(dev_priv->irq_queue,
|
|
|
|
i915_seqno_passed(i915_get_gem_seqno(dev),
|
|
|
|
seqno));
|
|
|
|
i915_user_irq_off(dev_priv);
|
|
|
|
}
|
2008-06-05 14:47:41 -06:00
|
|
|
if (ret)
|
2008-06-11 16:51:17 -06:00
|
|
|
DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
|
|
|
|
__func__, ret, seqno, i915_get_gem_seqno(dev));
|
2008-05-20 15:03:27 -06:00
|
|
|
|
|
|
|
/* Directly dispatch request retiring. While we have the work queue
|
|
|
|
* to handle this, the waiter on a request often wants an associated
|
|
|
|
* buffer to have made it to the inactive list, and we would need
|
|
|
|
* a separate wait queue to handle that.
|
|
|
|
*/
|
|
|
|
if (ret == 0)
|
|
|
|
i915_gem_retire_requests(dev);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2008-05-20 11:48:36 -06:00
|
|
|
|
2008-05-06 21:00:23 -06:00
|
|
|
static void
|
2008-05-08 12:45:53 -06:00
|
|
|
i915_gem_flush(struct drm_device *dev,
|
|
|
|
uint32_t invalidate_domains,
|
|
|
|
uint32_t flush_domains)
|
2008-05-06 21:00:23 -06:00
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
2008-05-08 12:45:53 -06:00
|
|
|
uint32_t cmd;
|
2008-05-06 21:00:23 -06:00
|
|
|
RING_LOCALS;
|
|
|
|
|
|
|
|
#if WATCH_EXEC
|
2008-05-08 12:45:53 -06:00
|
|
|
DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
|
2008-05-08 11:44:02 -06:00
|
|
|
invalidate_domains, flush_domains);
|
2008-05-06 21:00:23 -06:00
|
|
|
#endif
|
2008-05-07 13:46:06 -06:00
|
|
|
|
2008-06-11 15:42:40 -06:00
|
|
|
if (flush_domains & I915_GEM_DOMAIN_CPU)
|
2008-05-08 11:44:02 -06:00
|
|
|
drm_agp_chipset_flush(dev);
|
2008-05-08 12:45:53 -06:00
|
|
|
|
2008-06-11 15:42:40 -06:00
|
|
|
if ((invalidate_domains|flush_domains) & ~I915_GEM_DOMAIN_CPU) {
|
2008-05-08 12:45:53 -06:00
|
|
|
/*
|
|
|
|
* read/write caches:
|
|
|
|
*
|
2008-06-11 15:42:40 -06:00
|
|
|
* I915_GEM_DOMAIN_RENDER is always invalidated, but is
|
2008-05-08 12:45:53 -06:00
|
|
|
* only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
|
|
|
|
* also flushed at 2d versus 3d pipeline switches.
|
2008-05-08 11:44:02 -06:00
|
|
|
*
|
|
|
|
* read-only caches:
|
2008-05-08 12:45:53 -06:00
|
|
|
*
|
2008-06-11 15:42:40 -06:00
|
|
|
* I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
|
2008-05-08 12:45:53 -06:00
|
|
|
* MI_READ_FLUSH is set, and is always flushed on 965.
|
|
|
|
*
|
2008-06-11 15:42:40 -06:00
|
|
|
* I915_GEM_DOMAIN_COMMAND may not exist?
|
2008-05-08 12:45:53 -06:00
|
|
|
*
|
2008-06-11 15:42:40 -06:00
|
|
|
* I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
|
2008-05-08 12:45:53 -06:00
|
|
|
* invalidated when MI_EXE_FLUSH is set.
|
|
|
|
*
|
2008-06-11 15:42:40 -06:00
|
|
|
* I915_GEM_DOMAIN_VERTEX, which exists on 965, is
|
2008-05-08 12:45:53 -06:00
|
|
|
* invalidated with every MI_FLUSH.
|
2008-05-08 11:44:02 -06:00
|
|
|
*
|
|
|
|
* TLBs:
|
2008-05-08 12:45:53 -06:00
|
|
|
*
|
2008-06-11 15:42:40 -06:00
|
|
|
* On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
|
|
|
|
* and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
|
|
|
|
* I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
|
2008-05-08 12:45:53 -06:00
|
|
|
* are flushed at any MI_FLUSH.
|
2008-05-07 13:46:06 -06:00
|
|
|
*/
|
2008-05-08 12:45:53 -06:00
|
|
|
|
2008-05-08 11:44:02 -06:00
|
|
|
cmd = CMD_MI_FLUSH | MI_NO_WRITE_FLUSH;
|
2008-05-08 12:45:53 -06:00
|
|
|
if ((invalidate_domains|flush_domains) &
|
2008-06-11 15:42:40 -06:00
|
|
|
I915_GEM_DOMAIN_RENDER)
|
2008-05-08 11:44:02 -06:00
|
|
|
cmd &= ~MI_NO_WRITE_FLUSH;
|
|
|
|
if (!IS_I965G(dev)) {
|
2008-05-08 12:45:53 -06:00
|
|
|
/*
|
|
|
|
* On the 965, the sampler cache always gets flushed
|
|
|
|
* and this bit is reserved.
|
2008-05-08 11:44:02 -06:00
|
|
|
*/
|
2008-06-11 15:42:40 -06:00
|
|
|
if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
|
2008-05-08 11:44:02 -06:00
|
|
|
cmd |= MI_READ_FLUSH;
|
|
|
|
}
|
2008-06-11 15:42:40 -06:00
|
|
|
if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
|
2008-05-08 11:44:02 -06:00
|
|
|
cmd |= MI_EXE_FLUSH;
|
2008-05-08 12:45:53 -06:00
|
|
|
|
2008-05-22 11:59:59 -06:00
|
|
|
#if WATCH_EXEC
|
|
|
|
DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
|
|
|
|
#endif
|
2008-05-08 11:44:02 -06:00
|
|
|
BEGIN_LP_RING(2);
|
|
|
|
OUT_RING(cmd);
|
|
|
|
OUT_RING(0); /* noop */
|
|
|
|
ADVANCE_LP_RING();
|
2008-05-07 13:46:06 -06:00
|
|
|
}
|
2008-05-06 21:00:23 -06:00
|
|
|
}
|
|
|
|
|
2008-05-06 14:28:26 -06:00
|
|
|
/**
|
|
|
|
* Ensures that all rendering to the object has completed and the object is
|
2008-05-06 21:00:23 -06:00
|
|
|
* safe to unbind from the GTT or access from the CPU.
|
2008-05-06 14:28:26 -06:00
|
|
|
*/
|
|
|
|
static int
|
|
|
|
i915_gem_object_wait_rendering(struct drm_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->dev;
|
|
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
|
int ret;
|
|
|
|
|
2008-05-06 21:00:23 -06:00
|
|
|
/* If there are writes queued to the buffer, flush and
|
2008-05-20 11:52:24 -06:00
|
|
|
* create a new seqno to wait for.
|
2008-05-06 21:00:23 -06:00
|
|
|
*/
|
2008-06-11 15:42:40 -06:00
|
|
|
if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU)) {
|
2008-05-14 16:13:14 -06:00
|
|
|
uint32_t write_domain = obj->write_domain;
|
2008-05-06 21:00:23 -06:00
|
|
|
#if WATCH_BUF
|
2008-05-08 12:45:53 -06:00
|
|
|
DRM_INFO("%s: flushing object %p from write domain %08x\n",
|
2008-05-14 16:13:14 -06:00
|
|
|
__func__, obj, write_domain);
|
2008-05-06 21:00:23 -06:00
|
|
|
#endif
|
2008-05-14 16:13:14 -06:00
|
|
|
i915_gem_flush(dev, 0, write_domain);
|
2008-05-06 21:00:23 -06:00
|
|
|
obj->write_domain = 0;
|
2008-05-09 15:34:20 -06:00
|
|
|
|
2008-05-20 11:48:36 -06:00
|
|
|
i915_gem_object_move_to_active(obj);
|
2008-05-14 16:13:14 -06:00
|
|
|
obj_priv->last_rendering_seqno = i915_add_request(dev,
|
|
|
|
write_domain);
|
2008-05-20 11:52:24 -06:00
|
|
|
BUG_ON(obj_priv->last_rendering_seqno == 0);
|
2008-05-09 15:34:20 -06:00
|
|
|
#if WATCH_LRU
|
|
|
|
DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
|
|
|
|
#endif
|
2008-05-06 21:00:23 -06:00
|
|
|
}
|
2008-05-06 14:28:26 -06:00
|
|
|
/* If there is rendering queued on the buffer being evicted, wait for
|
|
|
|
* it.
|
|
|
|
*/
|
2008-05-20 11:48:36 -06:00
|
|
|
if (obj_priv->active) {
|
2008-05-06 21:00:23 -06:00
|
|
|
#if WATCH_BUF
|
2008-05-20 11:52:24 -06:00
|
|
|
DRM_INFO("%s: object %p wait for seqno %08x\n",
|
|
|
|
__func__, obj, obj_priv->last_rendering_seqno);
|
2008-05-06 21:00:23 -06:00
|
|
|
#endif
|
2008-05-20 15:03:27 -06:00
|
|
|
ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
|
2008-05-06 14:28:26 -06:00
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-05-01 15:20:44 -06:00
|
|
|
/**
|
|
|
|
* Unbinds an object from the GTT aperture.
|
|
|
|
*/
|
2008-06-02 11:59:15 -06:00
|
|
|
static int
|
2008-05-02 11:36:00 -06:00
|
|
|
i915_gem_object_unbind(struct drm_gem_object *obj)
|
2008-05-01 15:20:44 -06:00
|
|
|
{
|
|
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
2008-06-02 11:59:15 -06:00
|
|
|
int ret = 0;
|
2008-05-01 15:20:44 -06:00
|
|
|
|
2008-05-05 23:10:02 -06:00
|
|
|
#if WATCH_BUF
|
2008-05-08 12:45:53 -06:00
|
|
|
DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
|
|
|
|
DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
|
2008-05-05 15:32:01 -06:00
|
|
|
#endif
|
2008-05-01 18:31:57 -06:00
|
|
|
if (obj_priv->gtt_space == NULL)
|
2008-06-02 11:59:15 -06:00
|
|
|
return 0;
|
2008-05-01 18:31:57 -06:00
|
|
|
|
2008-05-22 12:34:56 -06:00
|
|
|
/* Move the object to the CPU domain to ensure that
|
|
|
|
* any possible CPU writes while it's not in the GTT
|
|
|
|
* are flushed when we go to remap it. This will
|
|
|
|
* also ensure that all pending GPU writes are finished
|
|
|
|
* before we unbind.
|
2008-05-09 15:34:20 -06:00
|
|
|
*/
|
2008-06-11 16:51:17 -06:00
|
|
|
ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
|
|
|
|
I915_GEM_DOMAIN_CPU);
|
2008-06-02 11:59:15 -06:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2008-05-06 14:28:26 -06:00
|
|
|
|
2008-05-01 15:20:44 -06:00
|
|
|
if (obj_priv->agp_mem != NULL) {
|
|
|
|
drm_unbind_agp(obj_priv->agp_mem);
|
|
|
|
drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
|
2008-05-09 16:07:49 -06:00
|
|
|
obj_priv->agp_mem = NULL;
|
2008-05-01 15:20:44 -06:00
|
|
|
}
|
|
|
|
|
2008-05-02 11:36:00 -06:00
|
|
|
i915_gem_object_free_page_list(obj);
|
2008-05-01 15:20:44 -06:00
|
|
|
|
|
|
|
drm_memrange_put_block(obj_priv->gtt_space);
|
2008-05-01 18:31:57 -06:00
|
|
|
obj_priv->gtt_space = NULL;
|
2008-05-09 15:34:20 -06:00
|
|
|
|
|
|
|
/* Remove ourselves from the LRU list if present. */
|
2008-05-15 10:37:49 -06:00
|
|
|
if (!list_empty(&obj_priv->list)) {
|
|
|
|
list_del_init(&obj_priv->list);
|
2008-05-20 11:48:36 -06:00
|
|
|
if (obj_priv->active) {
|
2008-05-09 15:34:20 -06:00
|
|
|
DRM_ERROR("Failed to wait on buffer when unbinding, "
|
|
|
|
"continued anyway.\n");
|
2008-05-20 11:48:36 -06:00
|
|
|
obj_priv->active = 0;
|
2008-05-09 15:34:20 -06:00
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
}
|
|
|
|
}
|
2008-06-02 11:59:15 -06:00
|
|
|
return 0;
|
2008-05-01 15:20:44 -06:00
|
|
|
}
|
|
|
|
|
2008-05-06 22:59:06 -06:00
|
|
|
#if WATCH_BUF | WATCH_EXEC
|
2008-05-05 15:22:42 -06:00
|
|
|
static void
|
2008-05-08 12:45:53 -06:00
|
|
|
i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
|
|
|
|
uint32_t bias, uint32_t mark)
|
2008-05-05 23:10:02 -06:00
|
|
|
{
|
2008-05-08 12:45:53 -06:00
|
|
|
uint32_t *mem = kmap_atomic(page, KM_USER0);
|
2008-05-05 23:10:02 -06:00
|
|
|
int i;
|
|
|
|
for (i = start; i < end; i += 4)
|
2008-05-08 12:45:53 -06:00
|
|
|
DRM_INFO("%08x: %08x%s\n",
|
2008-05-05 23:10:02 -06:00
|
|
|
(int) (bias + i), mem[i / 4],
|
|
|
|
(bias + i == mark) ? " ********" : "");
|
2008-05-08 12:45:53 -06:00
|
|
|
kunmap_atomic(mem, KM_USER0);
|
2008-05-05 23:10:02 -06:00
|
|
|
/* give syslog time to catch up */
|
2008-05-08 12:45:53 -06:00
|
|
|
msleep(1);
|
2008-05-05 23:10:02 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2008-05-08 12:45:53 -06:00
|
|
|
i915_gem_dump_object(struct drm_gem_object *obj, int len,
|
|
|
|
const char *where, uint32_t mark)
|
2008-05-05 15:22:42 -06:00
|
|
|
{
|
|
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
2008-05-05 23:10:02 -06:00
|
|
|
int page;
|
2008-05-05 15:22:42 -06:00
|
|
|
|
2008-05-08 12:45:53 -06:00
|
|
|
DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
|
|
|
|
for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
|
2008-05-05 23:10:02 -06:00
|
|
|
int page_len, chunk, chunk_len;
|
|
|
|
|
|
|
|
page_len = len - page * PAGE_SIZE;
|
|
|
|
if (page_len > PAGE_SIZE)
|
|
|
|
page_len = PAGE_SIZE;
|
|
|
|
|
|
|
|
for (chunk = 0; chunk < page_len; chunk += 128) {
|
|
|
|
chunk_len = page_len - chunk;
|
|
|
|
if (chunk_len > 128)
|
|
|
|
chunk_len = 128;
|
2008-05-08 12:45:53 -06:00
|
|
|
i915_gem_dump_page(obj_priv->page_list[page],
|
|
|
|
chunk, chunk + chunk_len,
|
|
|
|
obj_priv->gtt_offset +
|
|
|
|
page * PAGE_SIZE,
|
|
|
|
mark);
|
2008-05-05 23:10:02 -06:00
|
|
|
}
|
|
|
|
}
|
2008-05-05 15:22:42 -06:00
|
|
|
}
|
2008-05-06 14:28:26 -06:00
|
|
|
#endif
|
|
|
|
|
2008-05-06 22:59:06 -06:00
|
|
|
#if WATCH_LRU
|
|
|
|
static void
|
2008-05-08 12:45:53 -06:00
|
|
|
i915_dump_lru(struct drm_device *dev, const char *where)
|
2008-05-06 22:59:06 -06:00
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
struct drm_i915_gem_object *obj_priv;
|
2008-05-08 12:45:53 -06:00
|
|
|
|
2008-05-27 17:49:49 -06:00
|
|
|
DRM_INFO("active list %s {\n", where);
|
2008-05-15 10:37:49 -06:00
|
|
|
list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
|
|
|
|
list)
|
2008-05-09 15:34:20 -06:00
|
|
|
{
|
|
|
|
DRM_INFO(" %p: %08x\n", obj_priv,
|
2008-05-20 11:52:24 -06:00
|
|
|
obj_priv->last_rendering_seqno);
|
2008-05-09 15:34:20 -06:00
|
|
|
}
|
2008-05-27 17:49:49 -06:00
|
|
|
DRM_INFO("}\n");
|
|
|
|
DRM_INFO("flushing list %s {\n", where);
|
|
|
|
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
|
|
|
|
list)
|
|
|
|
{
|
|
|
|
DRM_INFO(" %p: %08x\n", obj_priv,
|
|
|
|
obj_priv->last_rendering_seqno);
|
|
|
|
}
|
|
|
|
DRM_INFO("}\n");
|
|
|
|
DRM_INFO("inactive %s {\n", where);
|
2008-05-15 10:37:49 -06:00
|
|
|
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
|
2008-05-08 12:45:53 -06:00
|
|
|
DRM_INFO(" %p: %08x\n", obj_priv,
|
2008-05-20 11:52:24 -06:00
|
|
|
obj_priv->last_rendering_seqno);
|
2008-05-06 22:59:06 -06:00
|
|
|
}
|
2008-05-08 12:45:53 -06:00
|
|
|
DRM_INFO("}\n");
|
2008-05-06 22:59:06 -06:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-05-06 14:28:26 -06:00
|
|
|
static int
|
|
|
|
i915_gem_evict_something(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
struct drm_i915_gem_object *obj_priv;
|
2008-06-13 12:33:27 -06:00
|
|
|
int ret = 0;
|
2008-05-06 14:28:26 -06:00
|
|
|
|
2008-05-14 16:13:14 -06:00
|
|
|
for (;;) {
|
|
|
|
/* If there's an inactive buffer available now, grab it
|
|
|
|
* and be done.
|
|
|
|
*/
|
|
|
|
if (!list_empty(&dev_priv->mm.inactive_list)) {
|
|
|
|
obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
|
|
|
|
struct drm_i915_gem_object,
|
|
|
|
list);
|
|
|
|
obj = obj_priv->obj;
|
|
|
|
BUG_ON(obj_priv->pin_count != 0);
|
2008-06-13 12:33:27 -06:00
|
|
|
#if WATCH_LRU
|
|
|
|
DRM_INFO("%s: evicting %p\n", __func__, obj);
|
|
|
|
#endif
|
|
|
|
BUG_ON(obj_priv->active);
|
|
|
|
|
|
|
|
/* Wait on the rendering and unbind the buffer. */
|
|
|
|
ret = i915_gem_object_unbind(obj);
|
2008-05-14 16:13:14 -06:00
|
|
|
break;
|
|
|
|
}
|
2008-05-21 16:15:58 -06:00
|
|
|
|
2008-05-14 16:13:14 -06:00
|
|
|
/* If we didn't get anything, but the ring is still processing
|
|
|
|
* things, wait for one of those things to finish and hopefully
|
|
|
|
* leave us a buffer to evict.
|
2008-05-09 15:34:20 -06:00
|
|
|
*/
|
2008-05-14 16:13:14 -06:00
|
|
|
if (!list_empty(&dev_priv->mm.request_list)) {
|
|
|
|
struct drm_i915_gem_request *request;
|
|
|
|
|
|
|
|
request = list_first_entry(&dev_priv->mm.request_list,
|
|
|
|
struct drm_i915_gem_request,
|
|
|
|
list);
|
|
|
|
|
|
|
|
ret = i915_wait_request(dev, request->seqno);
|
2008-06-13 12:33:27 -06:00
|
|
|
|
|
|
|
/* if waiting caused an object to become inactive,
|
|
|
|
* then loop around and wait for it. Otherwise, we
|
|
|
|
* assume that waiting freed and unbound something,
|
|
|
|
* so there should now be some space in the GTT
|
|
|
|
*/
|
|
|
|
if (!list_empty(&dev_priv->mm.inactive_list))
|
|
|
|
continue;
|
|
|
|
break;
|
2008-05-21 16:15:58 -06:00
|
|
|
}
|
2008-05-14 16:13:14 -06:00
|
|
|
|
|
|
|
/* If we didn't have anything on the request list but there
|
|
|
|
* are buffers awaiting a flush, emit one and try again.
|
|
|
|
* When we wait on it, those buffers waiting for that flush
|
|
|
|
* will get moved to inactive.
|
|
|
|
*/
|
|
|
|
if (!list_empty(&dev_priv->mm.flushing_list)) {
|
|
|
|
obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
|
|
|
|
struct drm_i915_gem_object,
|
|
|
|
list);
|
|
|
|
obj = obj_priv->obj;
|
|
|
|
|
|
|
|
i915_gem_flush(dev,
|
|
|
|
obj->write_domain,
|
|
|
|
obj->write_domain);
|
|
|
|
i915_add_request(dev, obj->write_domain);
|
|
|
|
|
|
|
|
obj = NULL;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2008-06-13 12:33:27 -06:00
|
|
|
DRM_ERROR("inactive empty %d request empty %d flushing empty %d\n",
|
|
|
|
list_empty(&dev_priv->mm.inactive_list),
|
|
|
|
list_empty(&dev_priv->mm.request_list),
|
|
|
|
list_empty(&dev_priv->mm.flushing_list));
|
2008-05-14 16:13:14 -06:00
|
|
|
/* If we didn't do any of the above, there's nothing to be done
|
|
|
|
* and we just can't fit it in.
|
|
|
|
*/
|
2008-05-09 15:34:20 -06:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2008-06-02 11:59:15 -06:00
|
|
|
return ret;
|
2008-05-06 14:28:26 -06:00
|
|
|
}
|
2008-05-05 15:22:42 -06:00
|
|
|
|
2008-05-11 01:10:16 -06:00
|
|
|
static int
|
|
|
|
i915_gem_object_get_page_list(struct drm_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
|
int page_count, i;
|
|
|
|
if (obj_priv->page_list)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Get the list of pages out of our struct file. They'll be pinned
|
|
|
|
* at this point until we release them.
|
|
|
|
*/
|
|
|
|
page_count = obj->size / PAGE_SIZE;
|
|
|
|
BUG_ON(obj_priv->page_list != NULL);
|
|
|
|
obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
|
|
|
|
DRM_MEM_DRIVER);
|
2008-06-10 16:30:23 -06:00
|
|
|
if (obj_priv->page_list == NULL) {
|
|
|
|
DRM_ERROR("Faled to allocate page list\n");
|
2008-05-11 01:10:16 -06:00
|
|
|
return -ENOMEM;
|
2008-06-10 16:30:23 -06:00
|
|
|
}
|
2008-05-11 01:10:16 -06:00
|
|
|
|
|
|
|
for (i = 0; i < page_count; i++) {
|
|
|
|
obj_priv->page_list[i] =
|
|
|
|
find_or_create_page(obj->filp->f_mapping, i, GFP_HIGHUSER);
|
|
|
|
|
|
|
|
if (obj_priv->page_list[i] == NULL) {
|
2008-06-10 16:30:23 -06:00
|
|
|
DRM_ERROR("Failed to find_or_create_page()\n");
|
2008-05-11 01:10:16 -06:00
|
|
|
i915_gem_object_free_page_list(obj);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
unlock_page(obj_priv->page_list[i]);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-05-01 15:20:44 -06:00
|
|
|
/**
|
|
|
|
* Finds free space in the GTT aperture and binds the object there.
|
|
|
|
*/
|
|
|
|
static int
|
2008-05-02 11:36:00 -06:00
|
|
|
i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
|
2008-05-01 15:20:44 -06:00
|
|
|
{
|
2008-05-02 11:36:00 -06:00
|
|
|
struct drm_device *dev = obj->dev;
|
2008-05-01 15:20:44 -06:00
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
|
struct drm_memrange_node *free_space;
|
2008-05-11 01:10:16 -06:00
|
|
|
int page_count, ret;
|
2008-05-01 15:20:44 -06:00
|
|
|
|
2008-05-01 21:41:55 -06:00
|
|
|
if (alignment == 0)
|
|
|
|
alignment = PAGE_SIZE;
|
2008-05-02 13:28:49 -06:00
|
|
|
if (alignment & (PAGE_SIZE - 1)) {
|
|
|
|
DRM_ERROR("Invalid object alignment requested %u\n", alignment);
|
2008-05-01 21:41:55 -06:00
|
|
|
return -EINVAL;
|
2008-05-02 13:28:49 -06:00
|
|
|
}
|
2008-05-01 21:41:55 -06:00
|
|
|
|
2008-05-06 14:28:26 -06:00
|
|
|
search_free:
|
2008-05-01 15:20:44 -06:00
|
|
|
free_space = drm_memrange_search_free(&dev_priv->mm.gtt_space,
|
|
|
|
obj->size,
|
2008-05-01 21:41:55 -06:00
|
|
|
alignment, 0);
|
2008-05-06 14:28:26 -06:00
|
|
|
if (free_space != NULL) {
|
|
|
|
obj_priv->gtt_space =
|
|
|
|
drm_memrange_get_block(free_space, obj->size,
|
|
|
|
alignment);
|
|
|
|
if (obj_priv->gtt_space != NULL) {
|
|
|
|
obj_priv->gtt_space->private = obj;
|
|
|
|
obj_priv->gtt_offset = obj_priv->gtt_space->start;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (obj_priv->gtt_space == NULL) {
|
|
|
|
/* If the gtt is empty and we're still having trouble
|
|
|
|
* fitting our object in, we're out of memory.
|
|
|
|
*/
|
2008-05-06 22:59:06 -06:00
|
|
|
#if WATCH_LRU
|
2008-05-08 12:45:53 -06:00
|
|
|
DRM_INFO("%s: GTT full, evicting something\n", __func__);
|
2008-05-06 22:59:06 -06:00
|
|
|
#endif
|
2008-05-15 10:37:49 -06:00
|
|
|
if (list_empty(&dev_priv->mm.inactive_list) &&
|
2008-06-10 16:30:23 -06:00
|
|
|
list_empty(&dev_priv->mm.flushing_list) &&
|
2008-05-15 10:37:49 -06:00
|
|
|
list_empty(&dev_priv->mm.active_list)) {
|
2008-05-08 12:45:53 -06:00
|
|
|
DRM_ERROR("GTT full, but LRU list empty\n");
|
2008-05-06 14:28:26 -06:00
|
|
|
return -ENOMEM;
|
2008-05-06 22:59:06 -06:00
|
|
|
}
|
2008-05-06 14:28:26 -06:00
|
|
|
|
|
|
|
ret = i915_gem_evict_something(dev);
|
2008-06-10 16:30:23 -06:00
|
|
|
if (ret != 0) {
|
2008-06-13 12:33:27 -06:00
|
|
|
DRM_ERROR("Failed to evict a buffer %d\n", ret);
|
2008-05-06 14:28:26 -06:00
|
|
|
return ret;
|
2008-06-10 16:30:23 -06:00
|
|
|
}
|
2008-05-06 14:28:26 -06:00
|
|
|
goto search_free;
|
|
|
|
}
|
2008-05-01 17:48:25 -06:00
|
|
|
|
2008-05-05 23:10:02 -06:00
|
|
|
#if WATCH_BUF
|
2008-05-08 12:45:53 -06:00
|
|
|
DRM_INFO("Binding object of size %d at 0x%08x\n",
|
|
|
|
obj->size, obj_priv->gtt_offset);
|
2008-05-05 15:32:01 -06:00
|
|
|
#endif
|
2008-05-12 14:04:18 -06:00
|
|
|
ret = i915_gem_object_get_page_list(obj);
|
2008-05-11 01:10:16 -06:00
|
|
|
if (ret) {
|
2008-05-12 14:04:18 -06:00
|
|
|
drm_memrange_put_block(obj_priv->gtt_space);
|
2008-05-01 18:31:57 -06:00
|
|
|
obj_priv->gtt_space = NULL;
|
2008-05-11 01:10:16 -06:00
|
|
|
return ret;
|
2008-05-01 15:20:44 -06:00
|
|
|
}
|
2008-05-08 12:45:53 -06:00
|
|
|
|
2008-05-11 01:10:16 -06:00
|
|
|
page_count = obj->size / PAGE_SIZE;
|
2008-05-01 15:20:44 -06:00
|
|
|
/* Create an AGP memory structure pointing at our pages, and bind it
|
|
|
|
* into the GTT.
|
|
|
|
*/
|
|
|
|
obj_priv->agp_mem = drm_agp_bind_pages(dev,
|
|
|
|
obj_priv->page_list,
|
|
|
|
page_count,
|
|
|
|
obj_priv->gtt_offset);
|
|
|
|
if (obj_priv->agp_mem == NULL) {
|
2008-05-02 11:36:00 -06:00
|
|
|
i915_gem_object_free_page_list(obj);
|
2008-05-01 17:48:25 -06:00
|
|
|
drm_memrange_put_block(obj_priv->gtt_space);
|
|
|
|
obj_priv->gtt_space = NULL;
|
2008-05-01 15:20:44 -06:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2008-05-23 00:08:38 -06:00
|
|
|
/* Assert that the object is not currently in any GPU domain. As it
|
|
|
|
* wasn't in the GTT, there shouldn't be any way it could have been in
|
|
|
|
* a GPU cache
|
2008-05-07 13:46:06 -06:00
|
|
|
*/
|
2008-06-11 15:42:40 -06:00
|
|
|
BUG_ON(obj->read_domains & ~I915_GEM_DOMAIN_CPU);
|
|
|
|
BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
|
2008-05-07 13:46:06 -06:00
|
|
|
|
2008-05-01 15:20:44 -06:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-05-06 21:00:23 -06:00
|
|
|
static void
|
2008-05-08 12:45:53 -06:00
|
|
|
i915_gem_clflush_object(struct drm_gem_object *obj)
|
2008-05-06 21:00:23 -06:00
|
|
|
{
|
|
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
|
|
2008-05-08 13:46:02 -06:00
|
|
|
/* If we don't have a page list set up, then we're not pinned
|
|
|
|
* to GPU, and we can ignore the cache flush because it'll happen
|
|
|
|
* again at bind time.
|
|
|
|
*/
|
|
|
|
if (obj_priv->page_list == NULL)
|
|
|
|
return;
|
|
|
|
|
2008-05-08 12:45:53 -06:00
|
|
|
drm_ttm_cache_flush(obj_priv->page_list, obj->size / PAGE_SIZE);
|
2008-05-06 21:00:23 -06:00
|
|
|
}
|
2008-05-08 12:45:53 -06:00
|
|
|
|
2008-05-06 21:00:23 -06:00
|
|
|
/*
|
|
|
|
* Set the next domain for the specified object. This
|
|
|
|
* may not actually perform the necessary flushing/invaliding though,
|
|
|
|
* as that may want to be batched with other set_domain operations
|
2008-05-23 00:08:38 -06:00
|
|
|
*
|
|
|
|
* This is (we hope) the only really tricky part of gem. The goal
|
|
|
|
* is fairly simple -- track which caches hold bits of the object
|
|
|
|
* and make sure they remain coherent. A few concrete examples may
|
|
|
|
* help to explain how it works. For shorthand, we use the notation
|
|
|
|
* (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
|
|
|
|
* a pair of read and write domain masks.
|
|
|
|
*
|
|
|
|
* Case 1: the batch buffer
|
|
|
|
*
|
|
|
|
* 1. Allocated
|
|
|
|
* 2. Written by CPU
|
|
|
|
* 3. Mapped to GTT
|
|
|
|
* 4. Read by GPU
|
|
|
|
* 5. Unmapped from GTT
|
|
|
|
* 6. Freed
|
|
|
|
*
|
|
|
|
* Let's take these a step at a time
|
|
|
|
*
|
|
|
|
* 1. Allocated
|
|
|
|
* Pages allocated from the kernel may still have
|
|
|
|
* cache contents, so we set them to (CPU, CPU) always.
|
|
|
|
* 2. Written by CPU (using pwrite)
|
|
|
|
* The pwrite function calls set_domain (CPU, CPU) and
|
|
|
|
* this function does nothing (as nothing changes)
|
|
|
|
* 3. Mapped by GTT
|
|
|
|
* This function asserts that the object is not
|
|
|
|
* currently in any GPU-based read or write domains
|
|
|
|
* 4. Read by GPU
|
|
|
|
* i915_gem_execbuffer calls set_domain (COMMAND, 0).
|
|
|
|
* As write_domain is zero, this function adds in the
|
|
|
|
* current read domains (CPU+COMMAND, 0).
|
|
|
|
* flush_domains is set to CPU.
|
|
|
|
* invalidate_domains is set to COMMAND
|
|
|
|
* clflush is run to get data out of the CPU caches
|
|
|
|
* then i915_dev_set_domain calls i915_gem_flush to
|
|
|
|
* emit an MI_FLUSH and drm_agp_chipset_flush
|
|
|
|
* 5. Unmapped from GTT
|
|
|
|
* i915_gem_object_unbind calls set_domain (CPU, CPU)
|
|
|
|
* flush_domains and invalidate_domains end up both zero
|
|
|
|
* so no flushing/invalidating happens
|
|
|
|
* 6. Freed
|
|
|
|
* yay, done
|
|
|
|
*
|
|
|
|
* Case 2: The shared render buffer
|
|
|
|
*
|
|
|
|
* 1. Allocated
|
|
|
|
* 2. Mapped to GTT
|
|
|
|
* 3. Read/written by GPU
|
|
|
|
* 4. set_domain to (CPU,CPU)
|
|
|
|
* 5. Read/written by CPU
|
|
|
|
* 6. Read/written by GPU
|
|
|
|
*
|
|
|
|
* 1. Allocated
|
|
|
|
* Same as last example, (CPU, CPU)
|
|
|
|
* 2. Mapped to GTT
|
|
|
|
* Nothing changes (assertions find that it is not in the GPU)
|
|
|
|
* 3. Read/written by GPU
|
|
|
|
* execbuffer calls set_domain (RENDER, RENDER)
|
|
|
|
* flush_domains gets CPU
|
|
|
|
* invalidate_domains gets GPU
|
|
|
|
* clflush (obj)
|
|
|
|
* MI_FLUSH and drm_agp_chipset_flush
|
|
|
|
* 4. set_domain (CPU, CPU)
|
|
|
|
* flush_domains gets GPU
|
|
|
|
* invalidate_domains gets CPU
|
|
|
|
* wait_rendering (obj) to make sure all drawing is complete.
|
|
|
|
* This will include an MI_FLUSH to get the data from GPU
|
|
|
|
* to memory
|
|
|
|
* clflush (obj) to invalidate the CPU cache
|
|
|
|
* Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
|
|
|
|
* 5. Read/written by CPU
|
|
|
|
* cache lines are loaded and dirtied
|
|
|
|
* 6. Read written by GPU
|
|
|
|
* Same as last GPU access
|
|
|
|
*
|
|
|
|
* Case 3: The constant buffer
|
|
|
|
*
|
|
|
|
* 1. Allocated
|
|
|
|
* 2. Written by CPU
|
|
|
|
* 3. Read by GPU
|
|
|
|
* 4. Updated (written) by CPU again
|
|
|
|
* 5. Read by GPU
|
|
|
|
*
|
|
|
|
* 1. Allocated
|
|
|
|
* (CPU, CPU)
|
|
|
|
* 2. Written by CPU
|
|
|
|
* (CPU, CPU)
|
|
|
|
* 3. Read by GPU
|
|
|
|
* (CPU+RENDER, 0)
|
|
|
|
* flush_domains = CPU
|
|
|
|
* invalidate_domains = RENDER
|
|
|
|
* clflush (obj)
|
|
|
|
* MI_FLUSH
|
|
|
|
* drm_agp_chipset_flush
|
|
|
|
* 4. Updated (written) by CPU again
|
|
|
|
* (CPU, CPU)
|
|
|
|
* flush_domains = 0 (no previous write domain)
|
|
|
|
* invalidate_domains = 0 (no new read domains)
|
|
|
|
* 5. Read by GPU
|
|
|
|
* (CPU+RENDER, 0)
|
|
|
|
* flush_domains = CPU
|
|
|
|
* invalidate_domains = RENDER
|
|
|
|
* clflush (obj)
|
|
|
|
* MI_FLUSH
|
|
|
|
* drm_agp_chipset_flush
|
2008-05-06 21:00:23 -06:00
|
|
|
*/
|
2008-06-02 11:59:15 -06:00
|
|
|
static int
|
2008-05-08 12:45:53 -06:00
|
|
|
i915_gem_object_set_domain(struct drm_gem_object *obj,
|
2008-05-06 21:00:23 -06:00
|
|
|
uint32_t read_domains,
|
|
|
|
uint32_t write_domain)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->dev;
|
|
|
|
uint32_t invalidate_domains = 0;
|
|
|
|
uint32_t flush_domains = 0;
|
2008-06-02 11:59:15 -06:00
|
|
|
int ret;
|
2008-05-08 12:45:53 -06:00
|
|
|
|
2008-05-06 21:00:23 -06:00
|
|
|
#if WATCH_BUF
|
2008-05-08 12:45:53 -06:00
|
|
|
DRM_INFO("%s: object %p read %08x write %08x\n",
|
|
|
|
__func__, obj, read_domains, write_domain);
|
2008-05-06 21:00:23 -06:00
|
|
|
#endif
|
2008-05-23 00:08:38 -06:00
|
|
|
/*
|
|
|
|
* If the object isn't moving to a new write domain,
|
|
|
|
* let the object stay in multiple read domains
|
|
|
|
*/
|
|
|
|
if (write_domain == 0)
|
|
|
|
read_domains |= obj->read_domains;
|
|
|
|
|
2008-05-06 21:00:23 -06:00
|
|
|
/*
|
|
|
|
* Flush the current write domain if
|
|
|
|
* the new read domains don't match. Invalidate
|
|
|
|
* any read domains which differ from the old
|
|
|
|
* write domain
|
|
|
|
*/
|
2008-05-08 12:45:53 -06:00
|
|
|
if (obj->write_domain && obj->write_domain != read_domains) {
|
2008-05-06 21:00:23 -06:00
|
|
|
flush_domains |= obj->write_domain;
|
|
|
|
invalidate_domains |= read_domains & ~obj->write_domain;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Invalidate any read caches which may have
|
|
|
|
* stale data. That is, any new read domains.
|
|
|
|
*/
|
|
|
|
invalidate_domains |= read_domains & ~obj->read_domains;
|
2008-06-11 15:42:40 -06:00
|
|
|
if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
|
2008-05-06 21:00:23 -06:00
|
|
|
#if WATCH_BUF
|
2008-05-08 12:45:53 -06:00
|
|
|
DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
|
|
|
|
__func__, flush_domains, invalidate_domains);
|
2008-05-06 21:00:23 -06:00
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
* If we're invaliding the CPU cache and flushing a GPU cache,
|
2008-05-08 12:45:53 -06:00
|
|
|
* then pause for rendering so that the GPU caches will be
|
2008-05-06 21:00:23 -06:00
|
|
|
* flushed before the cpu cache is invalidated
|
|
|
|
*/
|
2008-06-11 15:42:40 -06:00
|
|
|
if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
|
|
|
|
(flush_domains & ~I915_GEM_DOMAIN_CPU)) {
|
2008-06-02 11:59:15 -06:00
|
|
|
ret = i915_gem_object_wait_rendering(obj);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2008-05-08 12:45:53 -06:00
|
|
|
i915_gem_clflush_object(obj);
|
2008-05-06 21:00:23 -06:00
|
|
|
}
|
|
|
|
|
[intel-gem] Only update obj->write_domain if we're actually changing it.
The problem was revealed where on 965, the display list vertex buffer would see:
create -> (CPU, CPU)
set_domain (CPU, CPU) -> (CPU, CPU)
set_comain (CPU, 0) -> (CPU, 0) (no clflush occurred)
execbuf (GPU, 0) -> (CPU+GPU, 0) (still no clflush)
instead of:
create -> (CPU, CPU)
set_domain (CPU, CPU) -> (CPU, CPU)
set_comain (CPU, 0) -> (CPU, CPU)
execbuf (GPU, 0) -> (CPU+GPU, 0) (clflushed)
2008-05-30 14:47:34 -06:00
|
|
|
if ((write_domain | flush_domains) != 0)
|
|
|
|
obj->write_domain = write_domain;
|
2008-05-06 21:00:23 -06:00
|
|
|
obj->read_domains = read_domains;
|
2008-05-08 11:44:02 -06:00
|
|
|
dev->invalidate_domains |= invalidate_domains;
|
|
|
|
dev->flush_domains |= flush_domains;
|
2008-06-02 11:59:15 -06:00
|
|
|
return 0;
|
2008-05-08 11:44:02 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Once all of the objects have been set in the proper domain,
|
2008-05-14 16:13:14 -06:00
|
|
|
* perform the necessary flush and invalidate operations.
|
|
|
|
*
|
|
|
|
* Returns the write domains flushed, for use in flush tracking.
|
2008-05-08 11:44:02 -06:00
|
|
|
*/
|
2008-05-14 16:13:14 -06:00
|
|
|
static uint32_t
|
2008-05-08 12:45:53 -06:00
|
|
|
i915_gem_dev_set_domain(struct drm_device *dev)
|
2008-05-08 11:44:02 -06:00
|
|
|
{
|
2008-05-14 16:13:14 -06:00
|
|
|
uint32_t flush_domains = dev->flush_domains;
|
|
|
|
|
2008-05-08 11:44:02 -06:00
|
|
|
/*
|
|
|
|
* Now that all the buffers are synced to the proper domains,
|
|
|
|
* flush and invalidate the collected domains
|
|
|
|
*/
|
2008-05-08 12:45:53 -06:00
|
|
|
if (dev->invalidate_domains | dev->flush_domains) {
|
2008-05-08 11:44:02 -06:00
|
|
|
#if WATCH_EXEC
|
2008-05-08 12:45:53 -06:00
|
|
|
DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
|
|
|
|
__func__,
|
|
|
|
dev->invalidate_domains,
|
|
|
|
dev->flush_domains);
|
2008-05-08 11:44:02 -06:00
|
|
|
#endif
|
2008-05-08 12:45:53 -06:00
|
|
|
i915_gem_flush(dev,
|
|
|
|
dev->invalidate_domains,
|
|
|
|
dev->flush_domains);
|
2008-05-08 11:44:02 -06:00
|
|
|
dev->invalidate_domains = 0;
|
|
|
|
dev->flush_domains = 0;
|
|
|
|
}
|
2008-05-14 16:13:14 -06:00
|
|
|
|
|
|
|
return flush_domains;
|
2008-05-06 21:00:23 -06:00
|
|
|
}
|
|
|
|
|
2008-05-30 11:04:22 -06:00
|
|
|
#if WATCH_COHERENCY
|
|
|
|
static void
|
|
|
|
i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = obj->dev;
|
|
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
|
int page;
|
|
|
|
uint32_t *gtt_mapping;
|
|
|
|
uint32_t *backing_map = NULL;
|
|
|
|
int bad_count = 0;
|
|
|
|
|
|
|
|
DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
|
2008-06-11 16:51:17 -06:00
|
|
|
__func__, obj, obj_priv->gtt_offset, handle,
|
2008-05-30 11:04:22 -06:00
|
|
|
obj->size / 1024);
|
|
|
|
|
|
|
|
gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
|
|
|
|
obj->size);
|
|
|
|
if (gtt_mapping == NULL) {
|
|
|
|
DRM_ERROR("failed to map GTT space\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (page = 0; page < obj->size / PAGE_SIZE; page++) {
|
|
|
|
int i;
|
|
|
|
|
|
|
|
backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
|
|
|
|
|
|
|
|
if (backing_map == NULL) {
|
|
|
|
DRM_ERROR("failed to map backing page\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < PAGE_SIZE / 4; i++) {
|
|
|
|
uint32_t cpuval = backing_map[i];
|
|
|
|
uint32_t gttval = readl(gtt_mapping +
|
|
|
|
page * 1024 + i);
|
|
|
|
|
|
|
|
if (cpuval != gttval) {
|
|
|
|
DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
|
|
|
|
"0x%08x vs 0x%08x\n",
|
|
|
|
(int)(obj_priv->gtt_offset +
|
|
|
|
page * PAGE_SIZE + i * 4),
|
|
|
|
cpuval, gttval);
|
|
|
|
if (bad_count++ >= 8) {
|
|
|
|
DRM_INFO("...\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
kunmap_atomic(backing_map, KM_USER0);
|
|
|
|
backing_map = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (backing_map != NULL)
|
|
|
|
kunmap_atomic(backing_map, KM_USER0);
|
|
|
|
iounmap(gtt_mapping);
|
|
|
|
|
|
|
|
/* give syslog time to catch up */
|
|
|
|
msleep(1);
|
|
|
|
|
|
|
|
/* Directly flush the object, since we just loaded values with the CPU
|
|
|
|
* from thebacking pages and we don't want to disturb the cache
|
|
|
|
* management that we're trying to observe.
|
|
|
|
*/
|
|
|
|
|
|
|
|
i915_gem_clflush_object(obj);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-06-02 13:37:10 -06:00
|
|
|
/**
|
2008-06-13 15:28:18 -06:00
|
|
|
* Pin an object to the GTT and evaluate the relocations landing in it.
|
2008-06-02 13:37:10 -06:00
|
|
|
*/
|
2008-05-01 15:20:44 -06:00
|
|
|
static int
|
2008-06-13 15:28:18 -06:00
|
|
|
i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
|
|
|
|
struct drm_file *file_priv,
|
|
|
|
struct drm_i915_gem_exec_object *entry)
|
2008-05-01 15:20:44 -06:00
|
|
|
{
|
2008-05-02 11:36:00 -06:00
|
|
|
struct drm_device *dev = obj->dev;
|
2008-05-01 16:22:21 -06:00
|
|
|
struct drm_i915_gem_relocation_entry reloc;
|
2008-05-01 21:31:16 -06:00
|
|
|
struct drm_i915_gem_relocation_entry __user *relocs;
|
2008-05-01 15:20:44 -06:00
|
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
2008-06-13 15:28:18 -06:00
|
|
|
int i, ret;
|
2008-05-06 12:18:47 -06:00
|
|
|
uint32_t last_reloc_offset = -1;
|
|
|
|
void *reloc_page = NULL;
|
2008-05-01 15:20:44 -06:00
|
|
|
|
|
|
|
/* Choose the GTT offset for our buffer and put it there. */
|
2008-06-13 15:28:18 -06:00
|
|
|
ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2008-05-06 21:00:23 -06:00
|
|
|
|
2008-05-10 22:04:18 -06:00
|
|
|
entry->offset = obj_priv->gtt_offset;
|
2008-05-01 15:20:44 -06:00
|
|
|
|
2008-05-08 12:45:53 -06:00
|
|
|
relocs = (struct drm_i915_gem_relocation_entry __user *)
|
|
|
|
(uintptr_t) entry->relocs_ptr;
|
2008-05-01 16:22:21 -06:00
|
|
|
/* Apply the relocations, using the GTT aperture to avoid cache
|
|
|
|
* flushing requirements.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < entry->relocation_count; i++) {
|
|
|
|
struct drm_gem_object *target_obj;
|
|
|
|
struct drm_i915_gem_object *target_obj_priv;
|
2008-05-05 15:22:42 -06:00
|
|
|
uint32_t reloc_val, reloc_offset, *reloc_entry;
|
2008-05-01 16:22:21 -06:00
|
|
|
int ret;
|
|
|
|
|
2008-05-01 21:31:16 -06:00
|
|
|
ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
|
2008-06-13 15:28:18 -06:00
|
|
|
if (ret != 0) {
|
|
|
|
i915_gem_object_unpin(obj);
|
2008-05-01 16:22:21 -06:00
|
|
|
return ret;
|
2008-06-13 15:28:18 -06:00
|
|
|
}
|
2008-05-01 16:22:21 -06:00
|
|
|
|
2008-05-02 11:36:00 -06:00
|
|
|
target_obj = drm_gem_object_lookup(obj->dev, file_priv,
|
2008-05-01 16:22:21 -06:00
|
|
|
reloc.target_handle);
|
2008-06-13 15:28:18 -06:00
|
|
|
if (target_obj == NULL) {
|
|
|
|
i915_gem_object_unpin(obj);
|
2008-05-01 16:22:21 -06:00
|
|
|
return -EINVAL;
|
2008-06-13 15:28:18 -06:00
|
|
|
}
|
2008-05-01 16:22:21 -06:00
|
|
|
target_obj_priv = target_obj->driver_private;
|
|
|
|
|
|
|
|
/* The target buffer should have appeared before us in the
|
2008-06-02 13:37:10 -06:00
|
|
|
* exec_object list, so it should have a GTT space bound by now.
|
2008-05-01 16:22:21 -06:00
|
|
|
*/
|
|
|
|
if (target_obj_priv->gtt_space == NULL) {
|
|
|
|
DRM_ERROR("No GTT space found for object %d\n",
|
|
|
|
reloc.target_handle);
|
2008-05-08 12:45:53 -06:00
|
|
|
drm_gem_object_unreference(target_obj);
|
2008-06-13 15:28:18 -06:00
|
|
|
i915_gem_object_unpin(obj);
|
2008-05-01 16:22:21 -06:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (reloc.offset > obj->size - 4) {
|
2008-05-08 12:45:53 -06:00
|
|
|
DRM_ERROR("Relocation beyond object bounds: "
|
|
|
|
"obj %p target %d offset %d size %d.\n",
|
|
|
|
obj, reloc.target_handle,
|
|
|
|
(int) reloc.offset, (int) obj->size);
|
|
|
|
drm_gem_object_unreference(target_obj);
|
2008-06-13 15:28:18 -06:00
|
|
|
i915_gem_object_unpin(obj);
|
2008-05-01 16:22:21 -06:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (reloc.offset & 3) {
|
2008-05-08 12:45:53 -06:00
|
|
|
DRM_ERROR("Relocation not 4-byte aligned: "
|
|
|
|
"obj %p target %d offset %d.\n",
|
|
|
|
obj, reloc.target_handle,
|
|
|
|
(int) reloc.offset);
|
|
|
|
drm_gem_object_unreference(target_obj);
|
2008-06-13 15:28:18 -06:00
|
|
|
i915_gem_object_unpin(obj);
|
2008-05-01 16:22:21 -06:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2008-05-08 11:44:02 -06:00
|
|
|
if (reloc.write_domain && target_obj->pending_write_domain &&
|
2008-05-08 12:45:53 -06:00
|
|
|
reloc.write_domain != target_obj->pending_write_domain) {
|
|
|
|
DRM_ERROR("Write domain conflict: "
|
|
|
|
"obj %p target %d offset %d "
|
|
|
|
"new %08x old %08x\n",
|
|
|
|
obj, reloc.target_handle,
|
|
|
|
(int) reloc.offset,
|
|
|
|
reloc.write_domain,
|
|
|
|
target_obj->pending_write_domain);
|
|
|
|
drm_gem_object_unreference(target_obj);
|
2008-06-13 15:28:18 -06:00
|
|
|
i915_gem_object_unpin(obj);
|
2008-05-08 11:44:02 -06:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2008-05-08 12:45:53 -06:00
|
|
|
|
2008-05-08 11:44:02 -06:00
|
|
|
#if WATCH_RELOC
|
2008-05-08 12:45:53 -06:00
|
|
|
DRM_INFO("%s: obj %p offset %08x target %d "
|
|
|
|
"read %08x write %08x gtt %08x "
|
|
|
|
"presumed %08x delta %08x\n",
|
|
|
|
__func__,
|
|
|
|
obj,
|
|
|
|
(int) reloc.offset,
|
|
|
|
(int) reloc.target_handle,
|
|
|
|
(int) reloc.read_domains,
|
|
|
|
(int) reloc.write_domain,
|
|
|
|
(int) target_obj_priv->gtt_offset,
|
|
|
|
(int) reloc.presumed_offset,
|
|
|
|
reloc.delta);
|
2008-05-08 11:44:02 -06:00
|
|
|
#endif
|
2008-05-08 12:45:53 -06:00
|
|
|
|
2008-05-08 11:44:02 -06:00
|
|
|
target_obj->pending_read_domains |= reloc.read_domains;
|
|
|
|
target_obj->pending_write_domain |= reloc.write_domain;
|
|
|
|
|
2008-05-07 15:10:04 -06:00
|
|
|
/* If the relocation already has the right value in it, no
|
|
|
|
* more work needs to be done.
|
|
|
|
*/
|
2008-05-09 16:02:50 -06:00
|
|
|
if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
|
|
|
|
drm_gem_object_unreference(target_obj);
|
2008-05-06 12:25:53 -06:00
|
|
|
continue;
|
2008-05-09 16:02:50 -06:00
|
|
|
}
|
2008-05-06 12:25:53 -06:00
|
|
|
|
2008-05-07 15:10:04 -06:00
|
|
|
/* Now that we're going to actually write some data in,
|
|
|
|
* make sure that any rendering using this buffer's contents
|
|
|
|
* is completed.
|
|
|
|
*/
|
|
|
|
i915_gem_object_wait_rendering(obj);
|
|
|
|
|
2008-05-08 11:44:02 -06:00
|
|
|
/* As we're writing through the gtt, flush
|
|
|
|
* any CPU writes before we write the relocations
|
|
|
|
*/
|
2008-06-11 15:42:40 -06:00
|
|
|
if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
|
2008-05-08 12:45:53 -06:00
|
|
|
i915_gem_clflush_object(obj);
|
2008-05-08 11:44:02 -06:00
|
|
|
drm_agp_chipset_flush(dev);
|
|
|
|
obj->write_domain = 0;
|
|
|
|
}
|
|
|
|
|
2008-05-01 16:22:21 -06:00
|
|
|
/* Map the page containing the relocation we're going to
|
|
|
|
* perform.
|
|
|
|
*/
|
2008-05-05 15:22:42 -06:00
|
|
|
reloc_offset = obj_priv->gtt_offset + reloc.offset;
|
2008-05-06 12:18:47 -06:00
|
|
|
if (reloc_page == NULL ||
|
|
|
|
(last_reloc_offset & ~(PAGE_SIZE - 1)) !=
|
|
|
|
(reloc_offset & ~(PAGE_SIZE - 1))) {
|
|
|
|
if (reloc_page != NULL)
|
|
|
|
iounmap(reloc_page);
|
|
|
|
|
|
|
|
reloc_page = ioremap(dev->agp->base +
|
|
|
|
(reloc_offset & ~(PAGE_SIZE - 1)),
|
|
|
|
PAGE_SIZE);
|
|
|
|
last_reloc_offset = reloc_offset;
|
|
|
|
if (reloc_page == NULL) {
|
2008-05-08 12:45:53 -06:00
|
|
|
drm_gem_object_unreference(target_obj);
|
2008-06-13 15:28:18 -06:00
|
|
|
i915_gem_object_unpin(obj);
|
2008-05-06 12:18:47 -06:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2008-05-02 17:34:16 -06:00
|
|
|
}
|
2008-05-01 16:22:21 -06:00
|
|
|
|
|
|
|
reloc_entry = (uint32_t *)((char *)reloc_page +
|
2008-05-05 15:22:42 -06:00
|
|
|
(reloc_offset & (PAGE_SIZE - 1)));
|
2008-05-01 16:22:21 -06:00
|
|
|
reloc_val = target_obj_priv->gtt_offset + reloc.delta;
|
|
|
|
|
2008-05-05 23:10:02 -06:00
|
|
|
#if WATCH_BUF
|
2008-05-05 15:22:42 -06:00
|
|
|
DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
|
|
|
|
obj, (unsigned int) reloc.offset,
|
2008-05-08 12:45:53 -06:00
|
|
|
readl(reloc_entry), reloc_val);
|
2008-05-05 15:32:01 -06:00
|
|
|
#endif
|
2008-05-08 12:45:53 -06:00
|
|
|
writel(reloc_val, reloc_entry);
|
2008-05-01 16:22:21 -06:00
|
|
|
|
2008-05-29 13:53:13 -06:00
|
|
|
/* Write the updated presumed offset for this entry back out
|
|
|
|
* to the user.
|
|
|
|
*/
|
|
|
|
reloc.presumed_offset = target_obj_priv->gtt_offset;
|
|
|
|
ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
|
|
|
|
if (ret != 0) {
|
|
|
|
drm_gem_object_unreference(target_obj);
|
2008-06-13 15:28:18 -06:00
|
|
|
i915_gem_object_unpin(obj);
|
2008-05-29 13:53:13 -06:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-05-08 12:45:53 -06:00
|
|
|
drm_gem_object_unreference(target_obj);
|
2008-05-01 16:22:21 -06:00
|
|
|
}
|
|
|
|
|
2008-05-06 12:18:47 -06:00
|
|
|
if (reloc_page != NULL)
|
|
|
|
iounmap(reloc_page);
|
|
|
|
|
2008-05-05 23:10:02 -06:00
|
|
|
#if WATCH_BUF
|
[intel-gem] Only update obj->write_domain if we're actually changing it.
The problem was revealed where on 965, the display list vertex buffer would see:
create -> (CPU, CPU)
set_domain (CPU, CPU) -> (CPU, CPU)
set_comain (CPU, 0) -> (CPU, 0) (no clflush occurred)
execbuf (GPU, 0) -> (CPU+GPU, 0) (still no clflush)
instead of:
create -> (CPU, CPU)
set_domain (CPU, CPU) -> (CPU, CPU)
set_comain (CPU, 0) -> (CPU, CPU)
execbuf (GPU, 0) -> (CPU+GPU, 0) (clflushed)
2008-05-30 14:47:34 -06:00
|
|
|
if (0)
|
|
|
|
i915_gem_dump_object(obj, 128, __func__, ~0);
|
2008-05-05 23:10:02 -06:00
|
|
|
#endif
|
2008-05-01 15:20:44 -06:00
|
|
|
return 0;
|
2008-05-01 12:39:06 -06:00
|
|
|
}
|
|
|
|
|
2008-06-02 13:37:10 -06:00
|
|
|
/** Dispatch a batchbuffer to the ring
|
|
|
|
*/
|
2008-05-05 11:51:49 -06:00
|
|
|
static int
|
2008-05-08 12:45:53 -06:00
|
|
|
i915_dispatch_gem_execbuffer(struct drm_device *dev,
|
|
|
|
struct drm_i915_gem_execbuffer *exec,
|
2008-05-05 12:27:06 -06:00
|
|
|
uint64_t exec_offset)
|
2008-05-05 11:51:49 -06:00
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
2008-05-08 12:45:53 -06:00
|
|
|
struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
|
|
|
|
(uintptr_t) exec->cliprects_ptr;
|
2008-05-05 11:51:49 -06:00
|
|
|
int nbox = exec->num_cliprects;
|
|
|
|
int i = 0, count;
|
2008-05-05 12:27:06 -06:00
|
|
|
uint32_t exec_start, exec_len;
|
2008-05-05 11:51:49 -06:00
|
|
|
RING_LOCALS;
|
|
|
|
|
2008-05-05 12:27:06 -06:00
|
|
|
exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
|
|
|
|
exec_len = (uint32_t) exec->batch_len;
|
2008-05-08 12:45:53 -06:00
|
|
|
|
2008-05-05 12:27:06 -06:00
|
|
|
if ((exec_start | exec_len) & 0x7) {
|
2008-05-05 11:51:49 -06:00
|
|
|
DRM_ERROR("alignment\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2008-05-05 12:27:06 -06:00
|
|
|
if (!exec_start)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2008-05-05 11:51:49 -06:00
|
|
|
count = nbox ? nbox : 1;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
if (i < nbox) {
|
|
|
|
int ret = i915_emit_box(dev, boxes, i,
|
|
|
|
exec->DR1, exec->DR4);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-06-06 14:26:03 -06:00
|
|
|
if (IS_I830(dev) || IS_845G(dev)) {
|
|
|
|
BEGIN_LP_RING(4);
|
|
|
|
OUT_RING(MI_BATCH_BUFFER);
|
|
|
|
OUT_RING(exec_start | MI_BATCH_NON_SECURE);
|
|
|
|
OUT_RING(exec_start + exec_len - 4);
|
|
|
|
OUT_RING(0);
|
|
|
|
ADVANCE_LP_RING();
|
|
|
|
} else {
|
2008-05-05 11:51:49 -06:00
|
|
|
BEGIN_LP_RING(2);
|
|
|
|
if (IS_I965G(dev)) {
|
2008-05-08 12:45:53 -06:00
|
|
|
OUT_RING(MI_BATCH_BUFFER_START |
|
|
|
|
(2 << 6) |
|
|
|
|
MI_BATCH_NON_SECURE_I965);
|
2008-05-05 12:27:06 -06:00
|
|
|
OUT_RING(exec_start);
|
2008-05-05 11:51:49 -06:00
|
|
|
} else {
|
2008-05-08 12:45:53 -06:00
|
|
|
OUT_RING(MI_BATCH_BUFFER_START |
|
|
|
|
(2 << 6));
|
2008-05-05 12:27:06 -06:00
|
|
|
OUT_RING(exec_start | MI_BATCH_NON_SECURE);
|
2008-05-05 11:51:49 -06:00
|
|
|
}
|
|
|
|
ADVANCE_LP_RING();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* XXX breadcrumb */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-05-20 17:27:05 -06:00
|
|
|
/* Throttle our rendering by waiting until the ring has completed our requests
|
|
|
|
* emitted over 20 msec ago.
|
|
|
|
*
|
|
|
|
* This should get us reasonable parallelism between CPU and GPU but also
|
|
|
|
* relatively low latency when blocking on a particular request to finish.
|
2008-05-12 14:04:18 -06:00
|
|
|
*/
|
|
|
|
static int
|
2008-06-13 13:06:13 -06:00
|
|
|
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
|
2008-05-12 14:04:18 -06:00
|
|
|
{
|
2008-06-13 13:06:13 -06:00
|
|
|
struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
|
2008-05-12 14:04:18 -06:00
|
|
|
int ret = 0;
|
2008-06-13 13:06:13 -06:00
|
|
|
uint32_t seqno;
|
2008-05-12 14:04:18 -06:00
|
|
|
|
2008-05-15 12:21:11 -06:00
|
|
|
mutex_lock(&dev->struct_mutex);
|
2008-06-13 13:06:13 -06:00
|
|
|
seqno = i915_file_priv->mm.last_gem_throttle_seqno;
|
|
|
|
i915_file_priv->mm.last_gem_throttle_seqno = i915_file_priv->mm.last_gem_seqno;
|
|
|
|
if (seqno)
|
|
|
|
ret = i915_wait_request(dev, seqno);
|
2008-05-15 12:21:11 -06:00
|
|
|
mutex_unlock(&dev->struct_mutex);
|
2008-05-12 14:04:18 -06:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-05-01 12:39:06 -06:00
|
|
|
int
|
|
|
|
i915_gem_execbuffer(struct drm_device *dev, void *data,
|
2008-05-01 15:20:44 -06:00
|
|
|
struct drm_file *file_priv)
|
2008-05-01 12:39:06 -06:00
|
|
|
{
|
2008-06-10 16:30:23 -06:00
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
2008-06-13 13:06:13 -06:00
|
|
|
struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
|
2008-05-01 12:39:06 -06:00
|
|
|
struct drm_i915_gem_execbuffer *args = data;
|
2008-06-02 13:37:10 -06:00
|
|
|
struct drm_i915_gem_exec_object *exec_list = NULL;
|
2008-05-12 14:04:18 -06:00
|
|
|
struct drm_gem_object **object_list = NULL;
|
2008-05-08 12:45:53 -06:00
|
|
|
struct drm_gem_object *batch_obj;
|
2008-06-13 15:28:18 -06:00
|
|
|
int ret, i, pinned = 0;
|
2008-05-05 12:27:06 -06:00
|
|
|
uint64_t exec_offset;
|
2008-05-14 16:13:14 -06:00
|
|
|
uint32_t seqno, flush_domains;
|
2008-05-01 12:39:06 -06:00
|
|
|
|
2008-05-05 23:10:02 -06:00
|
|
|
#if WATCH_EXEC
|
2008-05-08 12:45:53 -06:00
|
|
|
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
|
2008-05-05 23:10:02 -06:00
|
|
|
(int) args->buffers_ptr, args->buffer_count, args->batch_len);
|
2008-05-05 15:32:01 -06:00
|
|
|
#endif
|
2008-05-02 18:50:46 -06:00
|
|
|
|
2008-06-02 13:37:10 -06:00
|
|
|
/* Copy in the exec list from userland */
|
|
|
|
exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
|
|
|
|
DRM_MEM_DRIVER);
|
2008-05-01 15:20:44 -06:00
|
|
|
object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
|
|
|
|
DRM_MEM_DRIVER);
|
2008-06-02 13:37:10 -06:00
|
|
|
if (exec_list == NULL || object_list == NULL) {
|
|
|
|
DRM_ERROR("Failed to allocate exec or object list "
|
2008-05-08 12:45:53 -06:00
|
|
|
"for %d buffers\n",
|
|
|
|
args->buffer_count);
|
2008-05-01 15:20:44 -06:00
|
|
|
ret = -ENOMEM;
|
2008-06-02 13:37:10 -06:00
|
|
|
goto pre_mutex_err;
|
2008-05-01 15:20:44 -06:00
|
|
|
}
|
2008-06-02 13:37:10 -06:00
|
|
|
ret = copy_from_user(exec_list,
|
2008-05-08 12:45:53 -06:00
|
|
|
(struct drm_i915_relocation_entry __user *)
|
|
|
|
(uintptr_t) args->buffers_ptr,
|
2008-06-02 13:37:10 -06:00
|
|
|
sizeof(*exec_list) * args->buffer_count);
|
2008-05-05 11:51:49 -06:00
|
|
|
if (ret != 0) {
|
2008-06-02 13:37:10 -06:00
|
|
|
DRM_ERROR("copy %d exec entries failed %d\n",
|
2008-05-08 12:45:53 -06:00
|
|
|
args->buffer_count, ret);
|
2008-06-02 13:37:10 -06:00
|
|
|
goto pre_mutex_err;
|
2008-05-05 11:51:49 -06:00
|
|
|
}
|
2008-05-01 12:39:06 -06:00
|
|
|
|
2008-05-15 12:21:11 -06:00
|
|
|
mutex_lock(&dev->struct_mutex);
|
2008-06-02 11:59:15 -06:00
|
|
|
|
2008-06-10 16:30:23 -06:00
|
|
|
if (dev_priv->mm.suspended) {
|
|
|
|
DRM_ERROR("Execbuf while VT-switched.\n");
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2008-06-02 11:59:15 -06:00
|
|
|
/* Zero the gloabl flush/invalidate flags. These
|
|
|
|
* will be modified as each object is bound to the
|
|
|
|
* gtt
|
|
|
|
*/
|
|
|
|
dev->invalidate_domains = 0;
|
|
|
|
dev->flush_domains = 0;
|
|
|
|
|
2008-05-01 15:20:44 -06:00
|
|
|
/* Look up object handles and perform the relocations */
|
2008-05-01 12:39:06 -06:00
|
|
|
for (i = 0; i < args->buffer_count; i++) {
|
2008-05-01 15:20:44 -06:00
|
|
|
object_list[i] = drm_gem_object_lookup(dev, file_priv,
|
2008-06-02 13:37:10 -06:00
|
|
|
exec_list[i].handle);
|
2008-05-01 15:20:44 -06:00
|
|
|
if (object_list[i] == NULL) {
|
2008-05-08 12:45:53 -06:00
|
|
|
DRM_ERROR("Invalid object handle %d at index %d\n",
|
2008-06-02 13:37:10 -06:00
|
|
|
exec_list[i].handle, i);
|
2008-05-01 15:20:44 -06:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2008-06-02 11:59:15 -06:00
|
|
|
object_list[i]->pending_read_domains = 0;
|
|
|
|
object_list[i]->pending_write_domain = 0;
|
2008-06-13 15:28:18 -06:00
|
|
|
ret = i915_gem_object_pin_and_relocate(object_list[i],
|
|
|
|
file_priv,
|
|
|
|
&exec_list[i]);
|
2008-05-05 12:27:06 -06:00
|
|
|
if (ret) {
|
2008-06-02 13:37:10 -06:00
|
|
|
DRM_ERROR("object bind and relocate failed %d\n", ret);
|
2008-05-05 12:27:06 -06:00
|
|
|
goto err;
|
|
|
|
}
|
2008-06-13 16:38:13 -06:00
|
|
|
pinned = i + 1;
|
2008-05-01 12:39:06 -06:00
|
|
|
}
|
|
|
|
|
2008-05-08 11:44:02 -06:00
|
|
|
/* Set the pending read domains for the batch buffer to COMMAND */
|
2008-05-08 12:45:53 -06:00
|
|
|
batch_obj = object_list[args->buffer_count-1];
|
2008-06-11 15:42:40 -06:00
|
|
|
batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
|
2008-05-08 12:45:53 -06:00
|
|
|
batch_obj->pending_write_domain = 0;
|
2008-05-08 11:44:02 -06:00
|
|
|
|
2008-05-06 14:28:26 -06:00
|
|
|
for (i = 0; i < args->buffer_count; i++) {
|
|
|
|
struct drm_gem_object *obj = object_list[i];
|
|
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
|
|
|
|
|
if (obj_priv->gtt_space == NULL) {
|
|
|
|
/* We evicted the buffer in the process of validating
|
|
|
|
* our set of buffers in. We could try to recover by
|
|
|
|
* kicking them everything out and trying again from
|
|
|
|
* the start.
|
|
|
|
*/
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2008-05-08 11:44:02 -06:00
|
|
|
/* make sure all previous memory operations have passed */
|
2008-06-02 11:59:15 -06:00
|
|
|
ret = i915_gem_object_set_domain(obj,
|
|
|
|
obj->pending_read_domains,
|
|
|
|
obj->pending_write_domain);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
2008-05-06 21:00:23 -06:00
|
|
|
}
|
|
|
|
|
2008-05-08 11:44:02 -06:00
|
|
|
/* Flush/invalidate caches and chipset buffer */
|
2008-05-14 16:13:14 -06:00
|
|
|
flush_domains = i915_gem_dev_set_domain(dev);
|
2008-05-05 12:27:06 -06:00
|
|
|
|
2008-05-30 11:04:22 -06:00
|
|
|
#if WATCH_COHERENCY
|
|
|
|
for (i = 0; i < args->buffer_count; i++) {
|
|
|
|
i915_gem_object_check_coherency(object_list[i],
|
2008-06-02 13:37:10 -06:00
|
|
|
exec_list[i].handle);
|
2008-05-30 11:04:22 -06:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-06-02 13:37:10 -06:00
|
|
|
exec_offset = exec_list[args->buffer_count - 1].offset;
|
2008-05-05 12:27:06 -06:00
|
|
|
|
2008-05-05 23:10:02 -06:00
|
|
|
#if WATCH_EXEC
|
2008-05-08 12:45:53 -06:00
|
|
|
i915_gem_dump_object(object_list[args->buffer_count - 1],
|
2008-05-05 23:10:02 -06:00
|
|
|
args->batch_len,
|
2008-05-08 12:45:53 -06:00
|
|
|
__func__,
|
2008-05-05 23:10:02 -06:00
|
|
|
~0);
|
|
|
|
#endif
|
2008-05-08 12:45:53 -06:00
|
|
|
|
2008-05-01 12:39:06 -06:00
|
|
|
/* Exec the batchbuffer */
|
2008-05-08 12:45:53 -06:00
|
|
|
ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
|
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("dispatch failed %d\n", ret);
|
2008-05-05 11:51:49 -06:00
|
|
|
goto err;
|
|
|
|
}
|
2008-05-01 12:39:06 -06:00
|
|
|
|
2008-05-22 11:59:59 -06:00
|
|
|
/*
|
|
|
|
* Ensure that the commands in the batch buffer are
|
|
|
|
* finished before the interrupt fires
|
|
|
|
*/
|
|
|
|
flush_domains |= i915_retire_commands(dev);
|
|
|
|
|
2008-05-08 12:45:53 -06:00
|
|
|
/*
|
2008-05-20 11:52:24 -06:00
|
|
|
* Get a seqno representing the execution of the current buffer,
|
2008-05-08 12:45:53 -06:00
|
|
|
* which we can wait on. We would like to mitigate these interrupts,
|
2008-05-20 11:52:24 -06:00
|
|
|
* likely by only creating seqnos occasionally (so that we have
|
2008-05-08 12:45:53 -06:00
|
|
|
* *some* interrupts representing completion of buffers that we can
|
|
|
|
* wait on when trying to clear up gtt space).
|
2008-05-06 14:28:26 -06:00
|
|
|
*/
|
2008-05-14 16:13:14 -06:00
|
|
|
seqno = i915_add_request(dev, flush_domains);
|
2008-05-20 11:52:24 -06:00
|
|
|
BUG_ON(seqno == 0);
|
2008-06-13 13:06:13 -06:00
|
|
|
i915_file_priv->mm.last_gem_seqno = seqno;
|
2008-05-06 14:28:26 -06:00
|
|
|
for (i = 0; i < args->buffer_count; i++) {
|
|
|
|
struct drm_gem_object *obj = object_list[i];
|
|
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
|
|
2008-05-20 11:48:36 -06:00
|
|
|
i915_gem_object_move_to_active(obj);
|
2008-05-20 11:52:24 -06:00
|
|
|
obj_priv->last_rendering_seqno = seqno;
|
2008-05-09 15:34:20 -06:00
|
|
|
#if WATCH_LRU
|
|
|
|
DRM_INFO("%s: move to exec list %p\n", __func__, obj);
|
|
|
|
#endif
|
2008-05-06 14:28:26 -06:00
|
|
|
}
|
2008-05-27 17:49:49 -06:00
|
|
|
#if WATCH_LRU
|
2008-05-09 15:34:20 -06:00
|
|
|
i915_dump_lru(dev, __func__);
|
|
|
|
#endif
|
2008-05-06 14:28:26 -06:00
|
|
|
|
2008-06-02 13:37:10 -06:00
|
|
|
/* Copy the new buffer offsets back to the user's exec list. */
|
2008-05-08 12:45:53 -06:00
|
|
|
ret = copy_to_user((struct drm_i915_relocation_entry __user *)
|
|
|
|
(uintptr_t) args->buffers_ptr,
|
2008-06-02 13:37:10 -06:00
|
|
|
exec_list,
|
|
|
|
sizeof(*exec_list) * args->buffer_count);
|
2008-05-05 11:51:49 -06:00
|
|
|
if (ret)
|
2008-06-02 13:37:10 -06:00
|
|
|
DRM_ERROR("failed to copy %d exec entries "
|
2008-05-08 12:45:53 -06:00
|
|
|
"back to user (%d)\n",
|
2008-05-05 11:51:49 -06:00
|
|
|
args->buffer_count, ret);
|
2008-05-01 15:20:44 -06:00
|
|
|
err:
|
|
|
|
if (object_list != NULL) {
|
2008-06-13 15:28:18 -06:00
|
|
|
for (i = 0; i < pinned; i++)
|
2008-06-13 16:38:13 -06:00
|
|
|
i915_gem_object_unpin(object_list[i]);
|
|
|
|
|
2008-05-01 15:20:44 -06:00
|
|
|
for (i = 0; i < args->buffer_count; i++)
|
2008-05-02 11:36:00 -06:00
|
|
|
drm_gem_object_unreference(object_list[i]);
|
2008-05-01 15:20:44 -06:00
|
|
|
}
|
2008-05-15 12:21:11 -06:00
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
2008-06-02 13:37:10 -06:00
|
|
|
pre_mutex_err:
|
2008-05-01 15:20:44 -06:00
|
|
|
drm_free(object_list, sizeof(*object_list) * args->buffer_count,
|
|
|
|
DRM_MEM_DRIVER);
|
2008-06-02 13:37:10 -06:00
|
|
|
drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
|
2008-05-01 12:39:06 -06:00
|
|
|
DRM_MEM_DRIVER);
|
|
|
|
|
2008-05-01 15:20:44 -06:00
|
|
|
return ret;
|
2008-05-01 12:39:06 -06:00
|
|
|
}
|
|
|
|
|
2008-06-10 16:30:23 -06:00
|
|
|
int
|
|
|
|
i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (obj_priv->gtt_space == NULL) {
|
|
|
|
ret = i915_gem_object_bind_to_gtt(obj, alignment);
|
|
|
|
if (ret != 0) {
|
2008-06-13 15:28:18 -06:00
|
|
|
DRM_ERROR("Failure to bind: %d", ret);
|
2008-06-10 16:30:23 -06:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
obj_priv->pin_count++;
|
2008-06-13 15:28:18 -06:00
|
|
|
|
|
|
|
/* If the object is not active and not pending a flush,
|
|
|
|
* remove it from the inactive list
|
|
|
|
*/
|
|
|
|
if (obj_priv->pin_count == 1 &&
|
|
|
|
!obj_priv->active &&
|
|
|
|
obj->write_domain == 0)
|
|
|
|
list_del_init(&obj_priv->list);
|
|
|
|
|
2008-06-10 16:30:23 -06:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
i915_gem_object_unpin(struct drm_gem_object *obj)
|
|
|
|
{
|
2008-06-13 15:28:18 -06:00
|
|
|
struct drm_device *dev = obj->dev;
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
2008-06-10 16:30:23 -06:00
|
|
|
struct drm_i915_gem_object *obj_priv = obj->driver_private;
|
|
|
|
|
|
|
|
obj_priv->pin_count--;
|
2008-06-13 15:28:18 -06:00
|
|
|
BUG_ON(obj_priv->pin_count < 0);
|
|
|
|
BUG_ON(obj_priv->gtt_space == NULL);
|
|
|
|
|
|
|
|
/* If the object is no longer pinned, and is
|
|
|
|
* neither active nor being flushed, then stick it on
|
|
|
|
* the inactive list
|
|
|
|
*/
|
|
|
|
if (obj_priv->pin_count == 0 &&
|
|
|
|
!obj_priv->active && obj->write_domain == 0)
|
|
|
|
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
|
2008-06-10 16:30:23 -06:00
|
|
|
}
|
|
|
|
|
2008-05-01 16:40:02 -06:00
|
|
|
int
|
2008-05-01 17:27:03 -06:00
|
|
|
i915_gem_pin_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
2008-05-01 16:40:02 -06:00
|
|
|
{
|
|
|
|
struct drm_i915_gem_pin *args = data;
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
struct drm_i915_gem_object *obj_priv;
|
|
|
|
int ret;
|
|
|
|
|
2008-05-15 12:21:11 -06:00
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
|
2008-05-01 16:40:02 -06:00
|
|
|
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
2008-05-01 18:31:57 -06:00
|
|
|
if (obj == NULL) {
|
|
|
|
DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
|
|
|
|
args->handle);
|
2008-05-15 12:21:11 -06:00
|
|
|
mutex_unlock(&dev->struct_mutex);
|
2008-05-01 16:40:02 -06:00
|
|
|
return -EINVAL;
|
2008-05-01 18:31:57 -06:00
|
|
|
}
|
2008-05-02 17:34:16 -06:00
|
|
|
obj_priv = obj->driver_private;
|
2008-06-10 16:30:23 -06:00
|
|
|
|
|
|
|
ret = i915_gem_object_pin(obj, args->alignment);
|
|
|
|
if (ret != 0) {
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
return ret;
|
2008-05-01 18:31:57 -06:00
|
|
|
}
|
2008-05-01 16:40:02 -06:00
|
|
|
|
|
|
|
args->offset = obj_priv->gtt_offset;
|
2008-05-08 12:45:53 -06:00
|
|
|
drm_gem_object_unreference(obj);
|
2008-05-15 12:21:11 -06:00
|
|
|
mutex_unlock(&dev->struct_mutex);
|
2008-05-01 16:40:02 -06:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2008-05-01 17:27:03 -06:00
|
|
|
i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
2008-05-01 16:40:02 -06:00
|
|
|
{
|
|
|
|
struct drm_i915_gem_pin *args = data;
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
|
2008-05-15 12:21:11 -06:00
|
|
|
mutex_lock(&dev->struct_mutex);
|
2008-05-21 01:32:02 -06:00
|
|
|
|
2008-05-01 16:40:02 -06:00
|
|
|
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
2008-05-01 18:31:57 -06:00
|
|
|
if (obj == NULL) {
|
|
|
|
DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
|
|
|
|
args->handle);
|
2008-05-15 12:21:11 -06:00
|
|
|
mutex_unlock(&dev->struct_mutex);
|
2008-05-01 16:40:02 -06:00
|
|
|
return -EINVAL;
|
2008-05-01 18:31:57 -06:00
|
|
|
}
|
2008-05-01 16:40:02 -06:00
|
|
|
|
2008-06-10 16:30:23 -06:00
|
|
|
i915_gem_object_unpin(obj);
|
|
|
|
|
2008-05-08 12:45:53 -06:00
|
|
|
drm_gem_object_unreference(obj);
|
2008-05-15 12:21:11 -06:00
|
|
|
mutex_unlock(&dev->struct_mutex);
|
2008-05-01 16:40:02 -06:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-05-25 21:45:20 -06:00
|
|
|
int
|
|
|
|
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_busy *args = data;
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
struct drm_i915_gem_object *obj_priv;
|
|
|
|
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
|
|
|
if (obj == NULL) {
|
|
|
|
DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
|
|
|
|
args->handle);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
obj_priv = obj->driver_private;
|
|
|
|
args->busy = obj_priv->active;
|
2008-06-11 16:51:17 -06:00
|
|
|
|
2008-05-25 21:45:20 -06:00
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-06-06 13:57:01 -06:00
|
|
|
int
|
|
|
|
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
2008-06-13 13:06:13 -06:00
|
|
|
return i915_gem_ring_throttle(dev, file_priv);
|
2008-06-06 13:57:01 -06:00
|
|
|
}
|
|
|
|
|
2008-05-02 11:36:00 -06:00
|
|
|
int i915_gem_init_object(struct drm_gem_object *obj)
|
2008-05-01 12:39:06 -06:00
|
|
|
{
|
|
|
|
struct drm_i915_gem_object *obj_priv;
|
|
|
|
|
|
|
|
obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
|
|
|
|
if (obj_priv == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2008-06-11 15:42:40 -06:00
|
|
|
/*
|
|
|
|
* We've just allocated pages from the kernel,
|
|
|
|
* so they've just been written by the CPU with
|
|
|
|
* zeros. They'll need to be clflushed before we
|
|
|
|
* use them with the GPU.
|
|
|
|
*/
|
|
|
|
obj->write_domain = I915_GEM_DOMAIN_CPU;
|
|
|
|
obj->read_domains = I915_GEM_DOMAIN_CPU;
|
|
|
|
|
2008-05-01 12:39:06 -06:00
|
|
|
obj->driver_private = obj_priv;
|
2008-05-06 22:59:06 -06:00
|
|
|
obj_priv->obj = obj;
|
2008-05-15 10:37:49 -06:00
|
|
|
INIT_LIST_HEAD(&obj_priv->list);
|
2008-05-01 12:39:06 -06:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-05-02 11:36:00 -06:00
|
|
|
void i915_gem_free_object(struct drm_gem_object *obj)
|
2008-05-01 12:39:06 -06:00
|
|
|
{
|
2008-05-02 11:36:00 -06:00
|
|
|
i915_gem_object_unbind(obj);
|
2008-05-01 18:31:57 -06:00
|
|
|
|
2008-05-01 12:39:06 -06:00
|
|
|
drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
|
|
|
|
}
|
2008-05-08 11:44:02 -06:00
|
|
|
|
|
|
|
int
|
2008-05-15 12:21:11 -06:00
|
|
|
i915_gem_set_domain(struct drm_gem_object *obj,
|
2008-05-27 18:50:39 -06:00
|
|
|
struct drm_file *file_priv,
|
2008-05-15 12:21:11 -06:00
|
|
|
uint32_t read_domains,
|
|
|
|
uint32_t write_domain)
|
2008-05-08 11:44:02 -06:00
|
|
|
{
|
2008-05-15 12:21:11 -06:00
|
|
|
struct drm_device *dev = obj->dev;
|
2008-06-02 11:59:15 -06:00
|
|
|
int ret;
|
2008-05-15 12:21:11 -06:00
|
|
|
|
|
|
|
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
|
|
|
|
|
2008-06-02 11:59:15 -06:00
|
|
|
ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
|
2008-06-11 17:19:23 -06:00
|
|
|
if (ret)
|
2008-06-02 11:59:15 -06:00
|
|
|
return ret;
|
2008-05-08 12:45:53 -06:00
|
|
|
i915_gem_dev_set_domain(obj->dev);
|
2008-06-11 17:19:23 -06:00
|
|
|
|
2008-05-08 11:44:02 -06:00
|
|
|
return 0;
|
|
|
|
}
|
2008-05-09 15:34:20 -06:00
|
|
|
|
|
|
|
void
|
|
|
|
i915_gem_lastclose(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
|
2008-05-15 12:21:11 -06:00
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
|
2008-05-09 15:34:20 -06:00
|
|
|
/* Assume that the chip has been idled at this point. Just pull them
|
|
|
|
* off the execution list and unref them. Since this is the last
|
|
|
|
* close, this is also the last ref and they'll go away.
|
|
|
|
*/
|
|
|
|
|
2008-05-15 10:37:49 -06:00
|
|
|
while (!list_empty(&dev_priv->mm.active_list)) {
|
2008-05-09 15:34:20 -06:00
|
|
|
struct drm_i915_gem_object *obj_priv;
|
|
|
|
|
2008-05-15 10:37:49 -06:00
|
|
|
obj_priv = list_first_entry(&dev_priv->mm.active_list,
|
2008-05-09 15:34:20 -06:00
|
|
|
struct drm_i915_gem_object,
|
2008-05-15 10:37:49 -06:00
|
|
|
list);
|
2008-05-09 15:34:20 -06:00
|
|
|
|
2008-05-15 10:37:49 -06:00
|
|
|
list_del_init(&obj_priv->list);
|
2008-05-20 11:48:36 -06:00
|
|
|
obj_priv->active = 0;
|
2008-05-09 15:34:20 -06:00
|
|
|
obj_priv->obj->write_domain = 0;
|
|
|
|
drm_gem_object_unreference(obj_priv->obj);
|
|
|
|
}
|
2008-05-15 12:21:11 -06:00
|
|
|
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
2008-05-09 15:34:20 -06:00
|
|
|
}
|
2008-06-10 16:30:23 -06:00
|
|
|
|
|
|
|
static int
|
|
|
|
i915_gem_init_ringbuffer(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
struct drm_i915_gem_object *obj_priv;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
obj = drm_gem_object_alloc(dev, 128 * 1024);
|
|
|
|
if (obj == NULL) {
|
|
|
|
DRM_ERROR("Failed to allocate ringbuffer\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
obj_priv = obj->driver_private;
|
|
|
|
|
|
|
|
ret = i915_gem_object_pin(obj, 4096);
|
2008-06-13 15:28:18 -06:00
|
|
|
if (ret != 0) {
|
|
|
|
drm_gem_object_unreference(obj);
|
2008-06-10 16:30:23 -06:00
|
|
|
return ret;
|
2008-06-13 15:28:18 -06:00
|
|
|
}
|
2008-06-10 16:30:23 -06:00
|
|
|
|
|
|
|
/* Set up the kernel mapping for the ring. */
|
|
|
|
dev_priv->ring.Size = obj->size;
|
|
|
|
dev_priv->ring.tail_mask = obj->size - 1;
|
|
|
|
|
|
|
|
dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
|
|
|
|
dev_priv->ring.map.size = obj->size;
|
|
|
|
dev_priv->ring.map.type = 0;
|
|
|
|
dev_priv->ring.map.flags = 0;
|
|
|
|
dev_priv->ring.map.mtrr = 0;
|
|
|
|
|
|
|
|
drm_core_ioremap(&dev_priv->ring.map, dev);
|
|
|
|
if (dev_priv->ring.map.handle == NULL) {
|
|
|
|
DRM_ERROR("Failed to map ringbuffer.\n");
|
|
|
|
memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
|
|
|
|
drm_gem_object_unreference(obj);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
dev_priv->ring.ring_obj = obj;
|
|
|
|
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
|
|
|
|
|
|
|
|
/* Stop the ring if it's running. */
|
|
|
|
I915_WRITE(LP_RING + RING_LEN, 0);
|
|
|
|
I915_WRITE(LP_RING + RING_HEAD, 0);
|
|
|
|
I915_WRITE(LP_RING + RING_TAIL, 0);
|
|
|
|
I915_WRITE(LP_RING + RING_START, 0);
|
|
|
|
|
|
|
|
/* Initialize the ring. */
|
|
|
|
I915_WRITE(LP_RING + RING_START, obj_priv->gtt_offset);
|
|
|
|
I915_WRITE(LP_RING + RING_LEN,
|
|
|
|
((obj->size - 4096) & RING_NR_PAGES) |
|
|
|
|
RING_NO_REPORT |
|
|
|
|
RING_VALID);
|
|
|
|
|
|
|
|
/* Update our cache of the ring state */
|
|
|
|
i915_kernel_lost_context(dev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
|
|
|
|
if (dev_priv->ring.ring_obj == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
drm_core_ioremapfree(&dev_priv->ring.map, dev);
|
|
|
|
|
|
|
|
i915_gem_object_unpin(dev_priv->ring.ring_obj);
|
|
|
|
drm_gem_object_unreference(dev_priv->ring.ring_obj);
|
|
|
|
|
|
|
|
memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = i915_gem_init_ringbuffer(dev);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
dev_priv->mm.suspended = 0;
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Unbinds all objects that are on the given buffer list. */
|
|
|
|
static int
|
|
|
|
i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
|
|
|
|
{
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
struct drm_i915_gem_object *obj_priv;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
while (!list_empty(head)) {
|
|
|
|
obj_priv = list_first_entry(head,
|
|
|
|
struct drm_i915_gem_object,
|
|
|
|
list);
|
|
|
|
obj = obj_priv->obj;
|
|
|
|
|
|
|
|
if (obj_priv->pin_count != 0) {
|
|
|
|
DRM_ERROR("Pinned object in unbind list\n");
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = i915_gem_object_unbind(obj);
|
|
|
|
if (ret != 0) {
|
|
|
|
DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
|
|
|
|
ret);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
|
|
|
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
|
|
/* Hack! Don't let anybody do execbuf while we don't control the chip.
|
|
|
|
* We need to replace this with a semaphore, or something.
|
|
|
|
*/
|
|
|
|
dev_priv->mm.suspended = 1;
|
|
|
|
|
|
|
|
/* Move all buffers out of the GTT. */
|
|
|
|
i915_gem_evict_from_list(dev, &dev_priv->mm.active_list);
|
|
|
|
i915_gem_evict_from_list(dev, &dev_priv->mm.flushing_list);
|
|
|
|
i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
|
|
|
|
|
|
|
|
/* Make sure the harware's idle. */
|
|
|
|
while (!list_empty(&dev_priv->mm.request_list)) {
|
|
|
|
struct drm_i915_gem_request *request;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
request = list_first_entry(&dev_priv->mm.request_list,
|
|
|
|
struct drm_i915_gem_request,
|
|
|
|
list);
|
|
|
|
|
|
|
|
ret = i915_wait_request(dev, request->seqno);
|
|
|
|
if (ret != 0) {
|
|
|
|
DRM_ERROR("Error waiting for idle at LeaveVT: %d\n",
|
|
|
|
ret);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
BUG_ON(!list_empty(&dev_priv->mm.active_list));
|
|
|
|
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
|
|
|
|
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
|
|
|
|
|
|
|
|
i915_gem_cleanup_ringbuffer(dev);
|
|
|
|
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|