Merge branch 'master' of git+ssh://git.freedesktop.org/git/mesa/drm into modesetting-101
Conflicts: shared-core/i915_dma.cmain
commit
1e66322633
|
@ -248,6 +248,7 @@ static void i915_restore_vga(struct drm_device *dev)
|
||||||
dev_priv->saveGR[0x18]);
|
dev_priv->saveGR[0x18]);
|
||||||
|
|
||||||
/* Attribute controller registers */
|
/* Attribute controller registers */
|
||||||
|
inb(st01); /* switch back to index mode */
|
||||||
for (i = 0; i < 20; i++)
|
for (i = 0; i < 20; i++)
|
||||||
i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
|
i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
|
||||||
inb(st01); /* switch back to index mode */
|
inb(st01); /* switch back to index mode */
|
||||||
|
|
|
@ -769,7 +769,10 @@ struct i915_relocatee_info {
|
||||||
|
|
||||||
struct drm_i915_validate_buffer {
|
struct drm_i915_validate_buffer {
|
||||||
struct drm_buffer_object *buffer;
|
struct drm_buffer_object *buffer;
|
||||||
|
struct drm_bo_info_rep rep;
|
||||||
int presumed_offset_correct;
|
int presumed_offset_correct;
|
||||||
|
void __user *data;
|
||||||
|
int ret;
|
||||||
};
|
};
|
||||||
|
|
||||||
static void i915_dereference_buffers_locked(struct drm_i915_validate_buffer *buffers,
|
static void i915_dereference_buffers_locked(struct drm_i915_validate_buffer *buffers,
|
||||||
|
@ -790,6 +793,10 @@ int i915_apply_reloc(struct drm_file *file_priv, int num_buffers,
|
||||||
int ret, i;
|
int ret, i;
|
||||||
int buf_index = -1;
|
int buf_index = -1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* FIXME: O(relocs * buffers) complexity.
|
||||||
|
*/
|
||||||
|
|
||||||
for (i = 0; i <= num_buffers; i++)
|
for (i = 0; i <= num_buffers; i++)
|
||||||
if (buffers[i].buffer)
|
if (buffers[i].buffer)
|
||||||
if (reloc[2] == buffers[i].buffer->base.hash.key)
|
if (reloc[2] == buffers[i].buffer->base.hash.key)
|
||||||
|
@ -811,21 +818,14 @@ int i915_apply_reloc(struct drm_file *file_priv, int num_buffers,
|
||||||
if (!relocatee->data_page ||
|
if (!relocatee->data_page ||
|
||||||
!drm_bo_same_page(relocatee->offset, new_cmd_offset)) {
|
!drm_bo_same_page(relocatee->offset, new_cmd_offset)) {
|
||||||
drm_bo_kunmap(&relocatee->kmap);
|
drm_bo_kunmap(&relocatee->kmap);
|
||||||
|
relocatee->data_page = NULL;
|
||||||
relocatee->offset = new_cmd_offset;
|
relocatee->offset = new_cmd_offset;
|
||||||
mutex_lock (&relocatee->buf->mutex);
|
|
||||||
ret = drm_bo_wait (relocatee->buf, 0, 0, FALSE);
|
|
||||||
mutex_unlock (&relocatee->buf->mutex);
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR("Could not wait for buffer to apply relocs\n %08lx", new_cmd_offset);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT,
|
ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT,
|
||||||
1, &relocatee->kmap);
|
1, &relocatee->kmap);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("Could not map command buffer to apply relocs\n %08lx", new_cmd_offset);
|
DRM_ERROR("Could not map command buffer to apply relocs\n %08lx", new_cmd_offset);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
|
relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
|
||||||
&relocatee->is_iomem);
|
&relocatee->is_iomem);
|
||||||
relocatee->page_offset = (relocatee->offset & PAGE_MASK);
|
relocatee->page_offset = (relocatee->offset & PAGE_MASK);
|
||||||
|
@ -844,7 +844,11 @@ int i915_apply_reloc(struct drm_file *file_priv, int num_buffers,
|
||||||
reloc[0], reloc[1], buf_index, relocatee->data_page[index], val);
|
reloc[0], reloc[1], buf_index, relocatee->data_page[index], val);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
relocatee->data_page[index] = val;
|
|
||||||
|
if (relocatee->is_iomem)
|
||||||
|
iowrite32(val, relocatee->data_page + index);
|
||||||
|
else
|
||||||
|
relocatee->data_page[index] = val;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -912,11 +916,13 @@ int i915_process_relocs(struct drm_file *file_priv,
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
|
||||||
if (reloc_buf)
|
if (reloc_buf)
|
||||||
kfree(reloc_buf);
|
kfree(reloc_buf);
|
||||||
drm_bo_kunmap(&relocatee->kmap);
|
|
||||||
relocatee->data_page = NULL;
|
if (relocatee->data_page) {
|
||||||
|
drm_bo_kunmap(&relocatee->kmap);
|
||||||
|
relocatee->data_page = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -956,22 +962,60 @@ static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,
|
||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_lock (&relocatee.buf->mutex);
|
||||||
|
ret = drm_bo_wait (relocatee.buf, 0, 0, FALSE);
|
||||||
|
if (ret)
|
||||||
|
goto out_err1;
|
||||||
|
|
||||||
while (reloc_user_ptr) {
|
while (reloc_user_ptr) {
|
||||||
ret = i915_process_relocs(file_priv, buf_handle, &reloc_user_ptr, &relocatee, buffers, buf_count);
|
ret = i915_process_relocs(file_priv, buf_handle, &reloc_user_ptr, &relocatee, buffers, buf_count);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("process relocs failed\n");
|
DRM_ERROR("process relocs failed\n");
|
||||||
break;
|
goto out_err1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
out_err1:
|
||||||
drm_bo_usage_deref_locked(&relocatee.buf);
|
mutex_unlock (&relocatee.buf->mutex);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
drm_bo_usage_deref_unlocked(&relocatee.buf);
|
||||||
|
|
||||||
out_err:
|
out_err:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int i915_check_presumed(struct drm_i915_op_arg *arg,
|
||||||
|
struct drm_buffer_object *bo,
|
||||||
|
uint32_t __user *data,
|
||||||
|
int *presumed_ok)
|
||||||
|
{
|
||||||
|
struct drm_bo_op_req *req = &arg->d.req;
|
||||||
|
uint32_t hint_offset;
|
||||||
|
uint32_t hint = req->bo_req.hint;
|
||||||
|
|
||||||
|
*presumed_ok = 0;
|
||||||
|
|
||||||
|
if (!(hint & DRM_BO_HINT_PRESUMED_OFFSET))
|
||||||
|
return 0;
|
||||||
|
if (bo->offset == req->bo_req.presumed_offset) {
|
||||||
|
*presumed_ok = 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We need to turn off the HINT_PRESUMED_OFFSET for this buffer in
|
||||||
|
* the user-space IOCTL argument list, since the buffer has moved,
|
||||||
|
* we're about to apply relocations and we might subsequently
|
||||||
|
* hit an -EAGAIN. In that case the argument list will be reused by
|
||||||
|
* user-space, but the presumed offset is no longer valid.
|
||||||
|
*
|
||||||
|
* Needless to say, this is a bit ugly.
|
||||||
|
*/
|
||||||
|
|
||||||
|
hint_offset = (uint32_t *)&req->bo_req.hint - (uint32_t *)arg;
|
||||||
|
hint &= ~DRM_BO_HINT_PRESUMED_OFFSET;
|
||||||
|
return __put_user(hint, data + hint_offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Validate, add fence and relocate a block of bos from a userspace list
|
* Validate, add fence and relocate a block of bos from a userspace list
|
||||||
*/
|
*/
|
||||||
|
@ -982,13 +1026,11 @@ int i915_validate_buffer_list(struct drm_file *file_priv,
|
||||||
{
|
{
|
||||||
struct drm_i915_op_arg arg;
|
struct drm_i915_op_arg arg;
|
||||||
struct drm_bo_op_req *req = &arg.d.req;
|
struct drm_bo_op_req *req = &arg.d.req;
|
||||||
struct drm_bo_arg_rep rep;
|
|
||||||
unsigned long next = 0;
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
unsigned buf_count = 0;
|
unsigned buf_count = 0;
|
||||||
struct drm_device *dev = file_priv->minor->dev;
|
|
||||||
uint32_t buf_handle;
|
uint32_t buf_handle;
|
||||||
uint32_t __user *reloc_user_ptr;
|
uint32_t __user *reloc_user_ptr;
|
||||||
|
struct drm_i915_validate_buffer *item = buffers;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (buf_count >= *num_buffers) {
|
if (buf_count >= *num_buffers) {
|
||||||
|
@ -996,31 +1038,26 @@ int i915_validate_buffer_list(struct drm_file *file_priv,
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
|
item = buffers + buf_count;
|
||||||
|
item->buffer = NULL;
|
||||||
|
item->presumed_offset_correct = 0;
|
||||||
|
|
||||||
buffers[buf_count].buffer = NULL;
|
buffers[buf_count].buffer = NULL;
|
||||||
buffers[buf_count].presumed_offset_correct = 0;
|
|
||||||
|
|
||||||
if (copy_from_user(&arg, (void __user *)(unsigned long)data, sizeof(arg))) {
|
if (copy_from_user(&arg, (void __user *)(unsigned long)data, sizeof(arg))) {
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (arg.handled) {
|
ret = 0;
|
||||||
data = arg.next;
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
buffers[buf_count].buffer = drm_lookup_buffer_object(file_priv, req->arg_handle, 1);
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
buf_count++;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
rep.ret = 0;
|
|
||||||
if (req->op != drm_bo_validate) {
|
if (req->op != drm_bo_validate) {
|
||||||
DRM_ERROR
|
DRM_ERROR
|
||||||
("Buffer object operation wasn't \"validate\".\n");
|
("Buffer object operation wasn't \"validate\".\n");
|
||||||
rep.ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
|
item->ret = 0;
|
||||||
|
item->data = (void __user *) (unsigned long) data;
|
||||||
|
|
||||||
buf_handle = req->bo_req.handle;
|
buf_handle = req->bo_req.handle;
|
||||||
reloc_user_ptr = (uint32_t *)(unsigned long)arg.reloc_ptr;
|
reloc_user_ptr = (uint32_t *)(unsigned long)arg.reloc_ptr;
|
||||||
|
@ -1032,48 +1069,146 @@ int i915_validate_buffer_list(struct drm_file *file_priv,
|
||||||
DRM_MEMORYBARRIER();
|
DRM_MEMORYBARRIER();
|
||||||
}
|
}
|
||||||
|
|
||||||
rep.ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
|
ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
|
||||||
req->bo_req.flags, req->bo_req.mask,
|
req->bo_req.flags, req->bo_req.mask,
|
||||||
req->bo_req.hint,
|
req->bo_req.hint,
|
||||||
req->bo_req.fence_class, 0,
|
req->bo_req.fence_class, 0,
|
||||||
&rep.bo_info,
|
&item->rep,
|
||||||
&buffers[buf_count].buffer);
|
&item->buffer);
|
||||||
|
|
||||||
if (rep.ret) {
|
if (ret) {
|
||||||
DRM_ERROR("error on handle validate %d\n", rep.ret);
|
DRM_ERROR("error on handle validate %d\n", ret);
|
||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
/*
|
|
||||||
* If the user provided a presumed offset hint, check whether
|
|
||||||
* the buffer is in the same place, if so, relocations relative to
|
|
||||||
* this buffer need not be performed
|
|
||||||
*/
|
|
||||||
if ((req->bo_req.hint & DRM_BO_HINT_PRESUMED_OFFSET) &&
|
|
||||||
buffers[buf_count].buffer->offset == req->bo_req.presumed_offset) {
|
|
||||||
buffers[buf_count].presumed_offset_correct = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
next = arg.next;
|
|
||||||
arg.handled = 1;
|
|
||||||
arg.d.rep = rep;
|
|
||||||
|
|
||||||
if (copy_to_user((void __user *)(unsigned long)data, &arg, sizeof(arg)))
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
data = next;
|
|
||||||
buf_count++;
|
buf_count++;
|
||||||
|
|
||||||
} while (next != 0);
|
ret = i915_check_presumed(&arg, item->buffer,
|
||||||
*num_buffers = buf_count;
|
(uint32_t __user *)
|
||||||
return 0;
|
(unsigned long) data,
|
||||||
|
&item->presumed_offset_correct);
|
||||||
|
if (ret)
|
||||||
|
goto out_err;
|
||||||
|
|
||||||
|
data = arg.next;
|
||||||
|
} while (data != 0);
|
||||||
out_err:
|
out_err:
|
||||||
mutex_lock(&dev->struct_mutex);
|
*num_buffers = buf_count;
|
||||||
i915_dereference_buffers_locked(buffers, buf_count);
|
item->ret = (ret != -EAGAIN) ? ret : 0;
|
||||||
mutex_unlock(&dev->struct_mutex);
|
return ret;
|
||||||
*num_buffers = 0;
|
|
||||||
return (ret) ? ret : rep.ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Remove all buffers from the unfenced list.
|
||||||
|
* If the execbuffer operation was aborted, for example due to a signal,
|
||||||
|
* this also make sure that buffers retain their original state and
|
||||||
|
* fence pointers.
|
||||||
|
* Copy back buffer information to user-space unless we were interrupted
|
||||||
|
* by a signal. In which case the IOCTL must be rerun.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static int i915_handle_copyback(struct drm_device *dev,
|
||||||
|
struct drm_i915_validate_buffer *buffers,
|
||||||
|
unsigned int num_buffers, int ret)
|
||||||
|
{
|
||||||
|
int err = ret;
|
||||||
|
int i;
|
||||||
|
struct drm_i915_op_arg arg;
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
drm_putback_buffer_objects(dev);
|
||||||
|
|
||||||
|
if (ret != -EAGAIN) {
|
||||||
|
for (i = 0; i < num_buffers; ++i) {
|
||||||
|
arg.handled = 1;
|
||||||
|
arg.d.rep.ret = buffers->ret;
|
||||||
|
arg.d.rep.bo_info = buffers->rep;
|
||||||
|
if (__copy_to_user(buffers->data, &arg, sizeof(arg)))
|
||||||
|
err = -EFAULT;
|
||||||
|
buffers++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Create a fence object, and if that fails, pretend that everything is
|
||||||
|
* OK and just idle the GPU.
|
||||||
|
*/
|
||||||
|
|
||||||
|
void i915_fence_or_sync(struct drm_file *file_priv,
|
||||||
|
uint32_t fence_flags,
|
||||||
|
struct drm_fence_arg *fence_arg,
|
||||||
|
struct drm_fence_object **fence_p)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = file_priv->minor->dev;
|
||||||
|
int ret;
|
||||||
|
struct drm_fence_object *fence;
|
||||||
|
|
||||||
|
ret = drm_fence_buffer_objects(dev, NULL, fence_flags,
|
||||||
|
NULL, &fence);
|
||||||
|
|
||||||
|
if (ret) {
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Fence creation failed.
|
||||||
|
* Fall back to synchronous operation and idle the engine.
|
||||||
|
*/
|
||||||
|
|
||||||
|
(void) i915_emit_mi_flush(dev, MI_READ_FLUSH);
|
||||||
|
(void) i915_quiescent(dev);
|
||||||
|
|
||||||
|
if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Communicate to user-space that
|
||||||
|
* fence creation has failed and that
|
||||||
|
* the engine is idle.
|
||||||
|
*/
|
||||||
|
|
||||||
|
fence_arg->handle = ~0;
|
||||||
|
fence_arg->error = ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
drm_putback_buffer_objects(dev);
|
||||||
|
if (fence_p)
|
||||||
|
*fence_p = NULL;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
|
||||||
|
|
||||||
|
ret = drm_fence_add_user_object(file_priv, fence,
|
||||||
|
fence_flags &
|
||||||
|
DRM_FENCE_FLAG_SHAREABLE);
|
||||||
|
if (!ret)
|
||||||
|
drm_fence_fill_arg(fence, fence_arg);
|
||||||
|
else {
|
||||||
|
/*
|
||||||
|
* Fence user object creation failed.
|
||||||
|
* We must idle the engine here as well, as user-
|
||||||
|
* space expects a fence object to wait on. Since we
|
||||||
|
* have a fence object we wait for it to signal
|
||||||
|
* to indicate engine "sufficiently" idle.
|
||||||
|
*/
|
||||||
|
|
||||||
|
(void) drm_fence_object_wait(fence, 0, 1,
|
||||||
|
fence->type);
|
||||||
|
drm_fence_usage_deref_unlocked(&fence);
|
||||||
|
fence_arg->handle = ~0;
|
||||||
|
fence_arg->error = ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (fence_p)
|
||||||
|
*fence_p = fence;
|
||||||
|
else if (fence)
|
||||||
|
drm_fence_usage_deref_unlocked(&fence);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static int i915_execbuffer(struct drm_device *dev, void *data,
|
static int i915_execbuffer(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv)
|
struct drm_file *file_priv)
|
||||||
{
|
{
|
||||||
|
@ -1087,7 +1222,6 @@ static int i915_execbuffer(struct drm_device *dev, void *data,
|
||||||
int num_buffers;
|
int num_buffers;
|
||||||
int ret;
|
int ret;
|
||||||
struct drm_i915_validate_buffer *buffers;
|
struct drm_i915_validate_buffer *buffers;
|
||||||
struct drm_fence_object *fence;
|
|
||||||
|
|
||||||
if (!dev_priv->allow_batchbuffer) {
|
if (!dev_priv->allow_batchbuffer) {
|
||||||
DRM_ERROR("Batchbuffer ioctl disabled\n");
|
DRM_ERROR("Batchbuffer ioctl disabled\n");
|
||||||
|
@ -1132,7 +1266,7 @@ static int i915_execbuffer(struct drm_device *dev, void *data,
|
||||||
ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list,
|
ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list,
|
||||||
buffers, &num_buffers);
|
buffers, &num_buffers);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_free;
|
goto out_err0;
|
||||||
|
|
||||||
/* make sure all previous memory operations have passed */
|
/* make sure all previous memory operations have passed */
|
||||||
DRM_MEMORYBARRIER();
|
DRM_MEMORYBARRIER();
|
||||||
|
@ -1151,30 +1285,16 @@ static int i915_execbuffer(struct drm_device *dev, void *data,
|
||||||
if (sarea_priv)
|
if (sarea_priv)
|
||||||
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
|
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
|
||||||
|
|
||||||
/* fence */
|
i915_fence_or_sync(file_priv, fence_arg->flags, fence_arg, NULL);
|
||||||
ret = drm_fence_buffer_objects(dev, NULL, fence_arg->flags,
|
|
||||||
NULL, &fence);
|
|
||||||
if (ret)
|
|
||||||
goto out_err0;
|
|
||||||
|
|
||||||
if (!(fence_arg->flags & DRM_FENCE_FLAG_NO_USER)) {
|
|
||||||
ret = drm_fence_add_user_object(file_priv, fence, fence_arg->flags & DRM_FENCE_FLAG_SHAREABLE);
|
|
||||||
if (!ret) {
|
|
||||||
fence_arg->handle = fence->base.hash.key;
|
|
||||||
fence_arg->fence_class = fence->fence_class;
|
|
||||||
fence_arg->type = fence->type;
|
|
||||||
fence_arg->signaled = fence->signaled_types;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
drm_fence_usage_deref_unlocked(&fence);
|
|
||||||
out_err0:
|
out_err0:
|
||||||
|
|
||||||
/* handle errors */
|
/* handle errors */
|
||||||
|
ret = i915_handle_copyback(dev, buffers, num_buffers, ret);
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
i915_dereference_buffers_locked(buffers, num_buffers);
|
i915_dereference_buffers_locked(buffers, num_buffers);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
out_free:
|
|
||||||
drm_free(buffers, (exec_buf->num_buffers * sizeof(struct drm_buffer_object *)), DRM_MEM_DRIVER);
|
drm_free(buffers, (exec_buf->num_buffers * sizeof(struct drm_buffer_object *)), DRM_MEM_DRIVER);
|
||||||
|
|
||||||
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
||||||
|
|
|
@ -29,11 +29,6 @@
|
||||||
#include "nouveau_drv.h"
|
#include "nouveau_drv.h"
|
||||||
#include "nouveau_dma.h"
|
#include "nouveau_dma.h"
|
||||||
|
|
||||||
/* FIXME : should go into a nouveau_drm.h define ?
|
|
||||||
* (it's shared between DRI & DDX & DRM)
|
|
||||||
*/
|
|
||||||
#define SKIPS 8
|
|
||||||
|
|
||||||
int
|
int
|
||||||
nouveau_dma_channel_init(struct drm_device *dev)
|
nouveau_dma_channel_init(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
|
@ -76,10 +71,10 @@ nouveau_dma_channel_init(struct drm_device *dev)
|
||||||
dchan->cur = dchan->put;
|
dchan->cur = dchan->put;
|
||||||
dchan->free = dchan->max - dchan->cur;
|
dchan->free = dchan->max - dchan->cur;
|
||||||
|
|
||||||
/* Insert NOPS for SKIPS */
|
/* Insert NOPS for NOUVEAU_DMA_SKIPS */
|
||||||
dchan->free -= SKIPS;
|
dchan->free -= NOUVEAU_DMA_SKIPS;
|
||||||
dchan->push_free = SKIPS;
|
dchan->push_free = NOUVEAU_DMA_SKIPS;
|
||||||
for (i=0; i<SKIPS; i++)
|
for (i=0; i < NOUVEAU_DMA_SKIPS; i++)
|
||||||
OUT_RING(0);
|
OUT_RING(0);
|
||||||
|
|
||||||
/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier */
|
/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier */
|
||||||
|
@ -131,8 +126,6 @@ nouveau_dma_channel_takedown(struct drm_device *dev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#define RING_SKIPS 8
|
|
||||||
|
|
||||||
#define READ_GET() ((NV_READ(dchan->chan->get) - \
|
#define READ_GET() ((NV_READ(dchan->chan->get) - \
|
||||||
dchan->chan->pushbuf_base) >> 2)
|
dchan->chan->pushbuf_base) >> 2)
|
||||||
#define WRITE_PUT(val) do { \
|
#define WRITE_PUT(val) do { \
|
||||||
|
@ -156,19 +149,19 @@ nouveau_dma_wait(struct drm_device *dev, int size)
|
||||||
if (dchan->free < size) {
|
if (dchan->free < size) {
|
||||||
dchan->push_free = 1;
|
dchan->push_free = 1;
|
||||||
OUT_RING(0x20000000|dchan->chan->pushbuf_base);
|
OUT_RING(0x20000000|dchan->chan->pushbuf_base);
|
||||||
if (get <= RING_SKIPS) {
|
if (get <= NOUVEAU_DMA_SKIPS) {
|
||||||
/*corner case - will be idle*/
|
/*corner case - will be idle*/
|
||||||
if (dchan->put <= RING_SKIPS)
|
if (dchan->put <= NOUVEAU_DMA_SKIPS)
|
||||||
WRITE_PUT(RING_SKIPS + 1);
|
WRITE_PUT(NOUVEAU_DMA_SKIPS + 1);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
get = READ_GET();
|
get = READ_GET();
|
||||||
} while (get <= RING_SKIPS);
|
} while (get <= NOUVEAU_DMA_SKIPS);
|
||||||
}
|
}
|
||||||
|
|
||||||
WRITE_PUT(RING_SKIPS);
|
WRITE_PUT(NOUVEAU_DMA_SKIPS);
|
||||||
dchan->cur = dchan->put = RING_SKIPS;
|
dchan->cur = dchan->put = NOUVEAU_DMA_SKIPS;
|
||||||
dchan->free = get - (RING_SKIPS + 1);
|
dchan->free = get - (NOUVEAU_DMA_SKIPS + 1);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
dchan->free = get - dchan->cur - 1;
|
dchan->free = get - dchan->cur - 1;
|
||||||
|
|
|
@ -70,6 +70,12 @@ struct drm_nouveau_gpuobj_free {
|
||||||
uint32_t handle;
|
uint32_t handle;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* This is needed to avoid a race condition.
|
||||||
|
* Otherwise you may be writing in the fetch area.
|
||||||
|
* Is this large enough, as it's only 32 bytes, and the maximum fetch size is 256 bytes?
|
||||||
|
*/
|
||||||
|
#define NOUVEAU_DMA_SKIPS 8
|
||||||
|
|
||||||
#define NOUVEAU_MEM_FB 0x00000001
|
#define NOUVEAU_MEM_FB 0x00000001
|
||||||
#define NOUVEAU_MEM_AGP 0x00000002
|
#define NOUVEAU_MEM_AGP 0x00000002
|
||||||
#define NOUVEAU_MEM_FB_ACCEPTABLE 0x00000004
|
#define NOUVEAU_MEM_FB_ACCEPTABLE 0x00000004
|
||||||
|
|
|
@ -45,11 +45,6 @@ int nouveau_fifo_ctx_size(struct drm_device *dev)
|
||||||
* functions doing the actual work
|
* functions doing the actual work
|
||||||
***********************************/
|
***********************************/
|
||||||
|
|
||||||
/* voir nv_xaa.c : NVResetGraphics
|
|
||||||
* mémoire mappée par nv_driver.c : NVMapMem
|
|
||||||
* voir nv_driver.c : NVPreInit
|
|
||||||
*/
|
|
||||||
|
|
||||||
static int nouveau_fifo_instmem_configure(struct drm_device *dev)
|
static int nouveau_fifo_instmem_configure(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
|
|
@ -1662,7 +1662,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
|
||||||
u32 height;
|
u32 height;
|
||||||
int i;
|
int i;
|
||||||
u32 texpitch, microtile;
|
u32 texpitch, microtile;
|
||||||
u32 offset;
|
u32 offset, byte_offset;
|
||||||
RING_LOCALS;
|
RING_LOCALS;
|
||||||
|
|
||||||
if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex->offset)) {
|
if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex->offset)) {
|
||||||
|
@ -1727,6 +1727,13 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
|
||||||
} else
|
} else
|
||||||
microtile = 0;
|
microtile = 0;
|
||||||
|
|
||||||
|
/* this might fail for zero-sized uploads - are those illegal? */
|
||||||
|
if (!radeon_check_offset(dev_priv, tex->offset + image->height *
|
||||||
|
blit_width - 1)) {
|
||||||
|
DRM_ERROR("Invalid final destination offset\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width);
|
DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
|
@ -1840,6 +1847,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
#undef RADEON_COPY_MT
|
#undef RADEON_COPY_MT
|
||||||
|
byte_offset = (image->y & ~2047) * blit_width;
|
||||||
buf->file_priv = file_priv;
|
buf->file_priv = file_priv;
|
||||||
buf->used = size;
|
buf->used = size;
|
||||||
offset = dev_priv->gart_buffers_offset + buf->offset;
|
offset = dev_priv->gart_buffers_offset + buf->offset;
|
||||||
|
@ -1854,9 +1862,9 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
|
||||||
RADEON_DP_SRC_SOURCE_MEMORY |
|
RADEON_DP_SRC_SOURCE_MEMORY |
|
||||||
RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
|
RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
|
||||||
OUT_RING((spitch << 22) | (offset >> 10));
|
OUT_RING((spitch << 22) | (offset >> 10));
|
||||||
OUT_RING((texpitch << 22) | (tex->offset >> 10));
|
OUT_RING((texpitch << 22) | ((tex->offset >> 10) + (byte_offset >> 10)));
|
||||||
OUT_RING(0);
|
OUT_RING(0);
|
||||||
OUT_RING((image->x << 16) | image->y);
|
OUT_RING((image->x << 16) | (image->y % 2048));
|
||||||
OUT_RING((image->width << 16) | height);
|
OUT_RING((image->width << 16) | height);
|
||||||
RADEON_WAIT_UNTIL_2D_IDLE();
|
RADEON_WAIT_UNTIL_2D_IDLE();
|
||||||
ADVANCE_RING();
|
ADVANCE_RING();
|
||||||
|
|
Loading…
Reference in New Issue