radeon_ms: initial pass at command buffer validation

main
Jerome Glisse 2008-03-31 00:55:05 +02:00 committed by John Doe
parent 2d9eccfd05
commit 09e637848a
10 changed files with 383 additions and 134 deletions

1
linux-core/amd_cbuffer.h Symbolic link
View File

@ -0,0 +1 @@
../shared-core/amd_cbuffer.h

51
shared-core/amd_cbuffer.h Normal file
View File

@ -0,0 +1,51 @@
/*
* Copyright 2007 Jérôme Glisse
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Jerome Glisse <glisse@freedesktop.org>
*/
#ifndef __AMD_CBUFFER_H__
#define __AMD_CBUFFER_H__
/* struct amd_cbuffer are for command buffer, this is the structure passed
* around during command validation (ie check that user have the right to
* execute the given command).
*/
struct amd_cbuffer_arg
{
struct list_head list;
struct drm_buffer_object *buffer;
int32_t dw_id;
};
struct amd_cbuffer
{
uint32_t *cbuffer;
uint32_t cbuffer_dw_count;
struct amd_cbuffer_arg arg_unused;
struct amd_cbuffer_arg arg_used;
struct amd_cbuffer_arg *args;
};
#endif

View File

@ -35,6 +35,7 @@
#include "radeon_ms_drm.h" #include "radeon_ms_drm.h"
#include "radeon_ms_rom.h" #include "radeon_ms_rom.h"
#include "radeon_ms_properties.h" #include "radeon_ms_properties.h"
#include "amd_cbuffer.h"
#define DRIVER_AUTHOR "Jerome Glisse, Dave Airlie, Gareth Hughes, "\ #define DRIVER_AUTHOR "Jerome Glisse, Dave Airlie, Gareth Hughes, "\
"Keith Whitwell, others." "Keith Whitwell, others."
@ -483,21 +484,6 @@ void radeon_ms_state_save(struct drm_device *dev, struct radeon_state *state);
void radeon_ms_state_restore(struct drm_device *dev, void radeon_ms_state_restore(struct drm_device *dev,
struct radeon_state *state); struct radeon_state *state);
/* packect stuff **************************************************************/
#define RADEON_CP_PACKET0 0x00000000
#define CP_PACKET0(reg, n) \
(RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
#define CP_PACKET3_CNTL_BITBLT_MULTI 0xC0009B00
# define GMC_SRC_PITCH_OFFSET_CNTL (1 << 0)
# define GMC_DST_PITCH_OFFSET_CNTL (1 << 1)
# define GMC_BRUSH_NONE (15 << 4)
# define GMC_SRC_DATATYPE_COLOR (3 << 12)
# define ROP3_S 0x00cc0000
# define DP_SRC_SOURCE_MEMORY (2 << 24)
# define GMC_CLR_CMP_CNTL_DIS (1 << 28)
# define GMC_WR_MSK_DIS (1 << 30)
/* helper macro & functions ***************************************************/ /* helper macro & functions ***************************************************/
#define REG_S(rn, bn, v) (((v) << rn##__##bn##__SHIFT) & rn##__##bn##__MASK) #define REG_S(rn, bn, v) (((v) << rn##__##bn##__SHIFT) & rn##__##bn##__MASK)
#define REG_G(rn, bn, v) (((v) & rn##__##bn##__MASK) >> rn##__##bn##__SHIFT) #define REG_G(rn, bn, v) (((v) & rn##__##bn##__MASK) >> rn##__##bn##__SHIFT)

View File

@ -33,6 +33,15 @@
#include "radeon_ms.h" #include "radeon_ms.h"
#define GMC_SRC_PITCH_OFFSET_CNTL (1 << 0)
#define GMC_DST_PITCH_OFFSET_CNTL (1 << 1)
#define GMC_BRUSH_NONE (15 << 4)
#define GMC_SRC_DATATYPE_COLOR (3 << 12)
#define ROP3_S 0x00cc0000
#define DP_SRC_SOURCE_MEMORY (2 << 24)
#define GMC_CLR_CMP_CNTL_DIS (1 << 28)
#define GMC_WR_MSK_DIS (1 << 30)
void radeon_ms_bo_copy_blit(struct drm_device *dev, void radeon_ms_bo_copy_blit(struct drm_device *dev,
uint32_t src_offset, uint32_t src_offset,
uint32_t dst_offset, uint32_t dst_offset,
@ -60,7 +69,7 @@ void radeon_ms_bo_copy_blit(struct drm_device *dev,
if (c >= 8192) { if (c >= 8192) {
c = 8191; c = 8191;
} }
cmd[0] = CP_PACKET3_CNTL_BITBLT_MULTI | (5 << 16); cmd[0] = CP_PACKET3(PACKET3_OPCODE_BITBLT, 5);
cmd[1] = GMC_SRC_PITCH_OFFSET_CNTL | cmd[1] = GMC_SRC_PITCH_OFFSET_CNTL |
GMC_DST_PITCH_OFFSET_CNTL | GMC_DST_PITCH_OFFSET_CNTL |
GMC_BRUSH_NONE | GMC_BRUSH_NONE |

View File

@ -218,6 +218,7 @@ int radeon_ms_agp_finish(struct drm_device *dev)
return 0; return 0;
} }
dev_priv->bus_ready = 0; dev_priv->bus_ready = 0;
DRM_INFO("[radeon_ms] release agp\n");
drm_agp_release(dev); drm_agp_release(dev);
return 0; return 0;
} }
@ -237,7 +238,7 @@ int radeon_ms_agp_init(struct drm_device *dev)
} }
ret = drm_agp_acquire(dev); ret = drm_agp_acquire(dev);
if (ret) { if (ret) {
DRM_ERROR("[radeon_ms] error failed to acquire agp\n"); DRM_ERROR("[radeon_ms] error failed to acquire agp %d\n", ret);
return ret; return ret;
} }
agp_status = MMIO_R(AGP_STATUS); agp_status = MMIO_R(AGP_STATUS);

View File

@ -156,7 +156,7 @@ int radeon_ms_cp_init(struct drm_device *dev)
dev_priv->ring_buffer_object->mem.num_pages, dev_priv->ring_buffer_object->mem.num_pages,
&dev_priv->ring_buffer_map); &dev_priv->ring_buffer_map);
if (ret) { if (ret) {
DRM_INFO("[radeon_ms] error mapping ring buffer: %d\n", ret); DRM_ERROR("[radeon_ms] error mapping ring buffer: %d\n", ret);
return ret; return ret;
} }
dev_priv->ring_buffer = dev_priv->ring_buffer_map.virtual; dev_priv->ring_buffer = dev_priv->ring_buffer_map.virtual;
@ -275,32 +275,15 @@ void radeon_ms_cp_save(struct drm_device *dev, struct radeon_state *state)
void radeon_ms_cp_stop(struct drm_device *dev) void radeon_ms_cp_stop(struct drm_device *dev)
{ {
struct drm_radeon_private *dev_priv = dev->dev_private; struct drm_radeon_private *dev_priv = dev->dev_private;
uint32_t rbbm_status, rbbm_status_cp_mask;
dev_priv->cp_ready = 0; MMIO_W(CP_CSQ_CNTL, REG_S(CP_CSQ_CNTL, CSQ_MODE,
MMIO_W(CP_CSQ_CNTL, 0); CSQ_MODE__CSQ_PRIDIS_INDDIS));
MMIO_R(CP_CSQ_CNTL);
MMIO_W(CP_CSQ_MODE, 0);
MMIO_R(CP_CSQ_MODE);
MMIO_W(RBBM_SOFT_RESET, RBBM_SOFT_RESET__SOFT_RESET_CP);
MMIO_R(RBBM_SOFT_RESET);
MMIO_W(RBBM_SOFT_RESET, 0);
MMIO_R(RBBM_SOFT_RESET);
rbbm_status = MMIO_R(RBBM_STATUS);
rbbm_status_cp_mask = (RBBM_STATUS__CPRQ_ON_RBB |
RBBM_STATUS__CPRQ_IN_RTBUF |
RBBM_STATUS__CP_CMDSTRM_BUSY);
if (rbbm_status & rbbm_status_cp_mask) {
DRM_INFO("[radeon_ms] cp busy (RBBM_STATUS: 0x%08X "
"RBBM_STATUS(cp_mask): 0x%08X)\n", rbbm_status,
rbbm_status_cp_mask);
}
MMIO_W(CP_RB_CNTL, CP_RB_CNTL__RB_RPTR_WR_ENA); MMIO_W(CP_RB_CNTL, CP_RB_CNTL__RB_RPTR_WR_ENA);
MMIO_W(CP_RB_RPTR_WR, 0); MMIO_W(CP_RB_RPTR_WR, 0);
MMIO_W(CP_RB_WPTR, 0); MMIO_W(CP_RB_WPTR, 0);
DRM_UDELAY(5); DRM_UDELAY(5);
dev_priv->ring_wptr = dev_priv->ring_rptr = MMIO_R(CP_RB_RPTR); dev_priv->ring_wptr = dev_priv->ring_rptr = MMIO_R(CP_RB_RPTR);
MMIO_W(CP_RB_CNTL, 0); MMIO_W(CP_RB_WPTR, dev_priv->ring_wptr);
} }
int radeon_ms_cp_wait(struct drm_device *dev, int n) int radeon_ms_cp_wait(struct drm_device *dev, int n)
@ -349,7 +332,7 @@ int radeon_ms_ring_emit(struct drm_device *dev, uint32_t *cmd, uint32_t count)
dev_priv->ring_free -= count; dev_priv->ring_free -= count;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
dev_priv->ring_buffer[dev_priv->ring_wptr] = cmd[i]; dev_priv->ring_buffer[dev_priv->ring_wptr] = cmd[i];
DRM_INFO("ring[%d] = 0x%08X\n", dev_priv->ring_wptr, cmd[i]); DRM_INFO("ring[%d]=0x%08X\n", dev_priv->ring_wptr, cmd[i]);
dev_priv->ring_wptr++; dev_priv->ring_wptr++;
dev_priv->ring_wptr &= dev_priv->ring_mask; dev_priv->ring_wptr &= dev_priv->ring_mask;
} }

View File

@ -25,14 +25,15 @@
* Jerome Glisse <glisse@freedesktop.org> * Jerome Glisse <glisse@freedesktop.org>
*/ */
#include "radeon_ms.h" #include "radeon_ms.h"
#include "amd_cbuffer.h"
static void radeon_ms_execbuffer_args_clean(struct drm_device *dev, static void radeon_ms_execbuffer_args_clean(struct drm_device *dev,
struct drm_buffer_object **buffers, struct amd_cbuffer *cbuffer,
uint32_t args_count) uint32_t args_count)
{ {
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
while (args_count--) { while (args_count--) {
drm_bo_usage_deref_locked(&buffers[args_count]); drm_bo_usage_deref_locked(&cbuffer->args[args_count].buffer);
} }
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }
@ -40,8 +41,7 @@ static void radeon_ms_execbuffer_args_clean(struct drm_device *dev,
static int radeon_ms_execbuffer_args(struct drm_device *dev, static int radeon_ms_execbuffer_args(struct drm_device *dev,
struct drm_file *file_priv, struct drm_file *file_priv,
struct drm_radeon_execbuffer *execbuffer, struct drm_radeon_execbuffer *execbuffer,
struct drm_buffer_object **buffers, struct amd_cbuffer *cbuffer)
uint32_t *relocs)
{ {
struct drm_radeon_execbuffer_arg arg; struct drm_radeon_execbuffer_arg arg;
struct drm_bo_arg_rep rep; struct drm_bo_arg_rep rep;
@ -57,17 +57,18 @@ static int radeon_ms_execbuffer_args(struct drm_device *dev,
ret = -EINVAL; ret = -EINVAL;
goto out_err; goto out_err;
} }
buffers[args_count] = NULL; INIT_LIST_HEAD(&cbuffer->args[args_count].list);
cbuffer->args[args_count].buffer = NULL;
if (copy_from_user(&arg, (void __user *)((unsigned)data), if (copy_from_user(&arg, (void __user *)((unsigned)data),
sizeof(struct drm_radeon_execbuffer_arg))) { sizeof(struct drm_radeon_execbuffer_arg))) {
ret = -EFAULT; ret = -EFAULT;
goto out_err; goto out_err;
} }
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
buffers[args_count] = cbuffer->args[args_count].buffer =
drm_lookup_buffer_object(file_priv, drm_lookup_buffer_object(file_priv,
arg.d.req.arg_handle, 1); arg.d.req.arg_handle, 1);
relocs[args_count] = arg.reloc_offset; cbuffer->args[args_count].dw_id = arg.reloc_offset;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
if (arg.d.req.op != drm_bo_validate) { if (arg.d.req.op != drm_bo_validate) {
DRM_ERROR("[radeon_ms] buffer object operation wasn't " DRM_ERROR("[radeon_ms] buffer object operation wasn't "
@ -76,7 +77,6 @@ static int radeon_ms_execbuffer_args(struct drm_device *dev,
goto out_err; goto out_err;
} }
memset(&rep, 0, sizeof(struct drm_bo_arg_rep)); memset(&rep, 0, sizeof(struct drm_bo_arg_rep));
if (args_count >= 1) {
ret = drm_bo_handle_validate(file_priv, ret = drm_bo_handle_validate(file_priv,
arg.d.req.bo_req.handle, arg.d.req.bo_req.handle,
arg.d.req.bo_req.flags, arg.d.req.bo_req.flags,
@ -85,8 +85,7 @@ static int radeon_ms_execbuffer_args(struct drm_device *dev,
arg.d.req.bo_req.fence_class, arg.d.req.bo_req.fence_class,
0, 0,
&rep.bo_info, &rep.bo_info,
&buffers[args_count]); &cbuffer->args[args_count].buffer);
}
if (ret) { if (ret) {
DRM_ERROR("[radeon_ms] error on handle validate %d\n", DRM_ERROR("[radeon_ms] error on handle validate %d\n",
ret); ret);
@ -101,6 +100,10 @@ static int radeon_ms_execbuffer_args(struct drm_device *dev,
goto out_err; goto out_err;
} }
data = next; data = next;
list_add_tail(&cbuffer->args[args_count].list,
&cbuffer->arg_unused.list);
args_count++; args_count++;
} while (next != 0); } while (next != 0);
if (args_count != execbuffer->args_count) { if (args_count != execbuffer->args_count) {
@ -111,34 +114,201 @@ static int radeon_ms_execbuffer_args(struct drm_device *dev,
} }
return 0; return 0;
out_err: out_err:
radeon_ms_execbuffer_args_clean(dev, buffers, args_count); radeon_ms_execbuffer_args_clean(dev, cbuffer, args_count);
return ret; return ret;
} }
static int radeon_ms_execbuffer_check(struct drm_device *dev, enum {
struct drm_file *file_priv, REGISTER_FORBIDDEN = 0,
struct drm_radeon_execbuffer *execbuffer, REGISTER_SAFE,
struct drm_buffer_object **buffers, REGISTER_SET_OFFSET,
uint32_t *relocs, };
uint32_t *cmd) static uint8_t _r3xx_register_right[0x5000 >> 2];
static int amd_cbuffer_packet0_set_offset(struct drm_device *dev,
struct amd_cbuffer *cbuffer,
uint32_t reg, int dw_id,
struct amd_cbuffer_arg *arg)
{ {
uint32_t i, gpu_addr; uint32_t gpu_addr;
int ret; int ret;
for (i = 0; i < execbuffer->args_count; i++) { ret = radeon_ms_bo_get_gpu_addr(dev, &arg->buffer->mem, &gpu_addr);
if (relocs[i]) { if (ret) {
ret = radeon_ms_bo_get_gpu_addr(dev, &buffers[i]->mem, return ret;
}
switch (reg) {
default:
return -EINVAL;
}
return 0;
}
static struct amd_cbuffer_arg *
amd_cbuffer_arg_from_dw_id(struct amd_cbuffer_arg *head, uint32_t dw_id)
{
struct amd_cbuffer_arg *arg;
list_for_each_entry(arg, &head->list, list) {
if (arg->dw_id == dw_id) {
return arg;
}
}
/* no buffer at this dw index */
return NULL;
}
static int amd_cbuffer_packet0_check(struct drm_device *dev,
struct drm_file *file_priv,
struct amd_cbuffer *cbuffer,
int dw_id,
uint8_t *register_right)
{
struct amd_cbuffer_arg *arg;
uint32_t reg, count, r, i;
int ret;
reg = cbuffer->cbuffer[dw_id] & PACKET0_REG_MASK;
count = (cbuffer->cbuffer[dw_id] & PACKET0_COUNT_MASK) >>
PACKET0_COUNT_SHIFT;
for (r = reg, i = 0; i <= count; i++, r++) {
switch (register_right[i]) {
case REGISTER_FORBIDDEN:
return -EINVAL;
case REGISTER_SAFE:
break;
case REGISTER_SET_OFFSET:
arg = amd_cbuffer_arg_from_dw_id(&cbuffer->arg_unused,
dw_id + i +1);
if (arg == NULL) {
return -EINVAL;
}
/* remove from unparsed list */
list_del(&arg->list);
list_add_tail(&arg->list, &cbuffer->arg_used.list);
/* set the offset */
ret = amd_cbuffer_packet0_set_offset(dev, cbuffer,
r, dw_id + i + 1,
arg);
if (ret) {
return ret;
}
break;
}
}
/* header + N + 1 dword passed test */
return count + 2;
}
static int amd_cbuffer_packet3_check(struct drm_device *dev,
struct drm_file *file_priv,
struct amd_cbuffer *cbuffer,
int dw_id)
{
struct amd_cbuffer_arg *arg;
uint32_t opcode, count;
uint32_t s_auth, s_mask;
uint32_t gpu_addr;
int ret;
opcode = (cbuffer->cbuffer[dw_id] & PACKET3_OPCODE_MASK) >>
PACKET3_OPCODE_SHIFT;
count = (cbuffer->cbuffer[dw_id] & PACKET3_COUNT_MASK) >>
PACKET3_COUNT_SHIFT;
switch (opcode) {
case PACKET3_OPCODE_NOP:
break;
case PACKET3_OPCODE_BITBLT:
case PACKET3_OPCODE_BITBLT_MULTI:
DRM_INFO("[radeon_ms] exec step - [05][P3]00.00\n");
/* we only alow simple blit */
if (count != 5) {
return -EINVAL;
}
DRM_INFO("[radeon_ms] exec step - [05][P3]01.00\n");
s_mask = 0xf;
s_auth = 0x3;
if ((cbuffer->cbuffer[dw_id + 1] & s_mask) != s_auth) {
return -EINVAL;
}
DRM_INFO("[radeon_ms] exec step - [05][P3]02.00\n");
arg = amd_cbuffer_arg_from_dw_id(&cbuffer->arg_unused, dw_id+2);
if (arg == NULL) {
return -EINVAL;
}
DRM_INFO("[radeon_ms] exec step - [05][P3]03.00\n");
ret = radeon_ms_bo_get_gpu_addr(dev, &arg->buffer->mem,
&gpu_addr); &gpu_addr);
if (ret) { if (ret) {
return ret; return ret;
} }
cmd[relocs[i]] |= (gpu_addr) >> 10; DRM_INFO("[radeon_ms] exec step - [05][P3]04.00\n");
gpu_addr = (gpu_addr >> 10) & 0x003FFFFF;
cbuffer->cbuffer[dw_id + 2] &= 0xFFC00000;
cbuffer->cbuffer[dw_id + 2] |= gpu_addr;
arg = amd_cbuffer_arg_from_dw_id(&cbuffer->arg_unused, dw_id+3);
if (arg == NULL) {
return -EINVAL;
} }
DRM_INFO("[radeon_ms] exec step - [05][P3]05.00\n");
ret = radeon_ms_bo_get_gpu_addr(dev, &arg->buffer->mem,
&gpu_addr);
if (ret) {
return ret;
}
DRM_INFO("[radeon_ms] exec step - [05][P3]06.00\n");
gpu_addr = (gpu_addr >> 10) & 0x003FFFFF;
cbuffer->cbuffer[dw_id + 3] &= 0xFFC00000;
cbuffer->cbuffer[dw_id + 3] |= gpu_addr;
DRM_INFO("[radeon_ms] exec step - [05][P3]07.00\n");
/* FIXME: check that source & destination are big enough
* for requested blit */
break;
default:
return -EINVAL;
}
/* header + N + 1 dword passed test */
return count + 2;
}
static int amd_cbuffer_check(struct drm_device *dev,
struct drm_file *file_priv,
struct amd_cbuffer *cbuffer)
{
uint32_t i;
int ret;
for (i = 0; i < cbuffer->cbuffer_dw_count;) {
DRM_INFO("[radeon_ms] exec step - [05]00.00 %d 0x%08X\n",
i, cbuffer->cbuffer[i]);
switch (PACKET_HEADER_GET(cbuffer->cbuffer[i])) {
case 0:
ret = amd_cbuffer_packet0_check(dev, file_priv,
cbuffer, i,
_r3xx_register_right);
if (ret) {
return ret;
}
/* advance to next packet */
i += ret;
break;
case 1:
/* we don't accept packet 1 */
return -EINVAL;
case 2:
/* packet 2 */
i += 1;
break;
case 3:
ret = amd_cbuffer_packet3_check(dev, file_priv,
cbuffer, i);
if (ret) {
return ret;
}
/* advance to next packet */
i += ret;
break;
} }
for (i = 0; i < execbuffer->cmd_size; i++) {
#if 0
DRM_INFO("cmd[%d]=0x%08X\n", i, cmd[i]);
#endif
} }
return 0; return 0;
} }
@ -148,69 +318,87 @@ int radeon_ms_execbuffer(struct drm_device *dev, void *data,
{ {
struct drm_radeon_execbuffer *execbuffer = data; struct drm_radeon_execbuffer *execbuffer = data;
struct drm_fence_arg *fence_arg = &execbuffer->fence_arg; struct drm_fence_arg *fence_arg = &execbuffer->fence_arg;
struct drm_buffer_object **buffers;
struct drm_bo_kmap_obj cmd_kmap; struct drm_bo_kmap_obj cmd_kmap;
struct drm_fence_object *fence; struct drm_fence_object *fence;
uint32_t *relocs;
uint32_t *cmd;
int cmd_is_iomem; int cmd_is_iomem;
int ret = 0; int ret = 0;
struct amd_cbuffer cbuffer;
/* command buffer dword count must be >= 0 */
if (execbuffer->cmd_size < 0) {
return -EINVAL;
}
/* FIXME: Lock buffer manager, is this really needed ?
*/
DRM_INFO("[radeon_ms] exec step - 00.00\n");
ret = drm_bo_read_lock(&dev->bm.bm_lock); ret = drm_bo_read_lock(&dev->bm.bm_lock);
if (ret) { if (ret) {
return ret; return ret;
} }
relocs = drm_calloc(execbuffer->args_count, sizeof(uint32_t), DRM_INFO("[radeon_ms] exec step - 01.00\n");
cbuffer.args = drm_calloc(execbuffer->args_count,
sizeof(struct amd_cbuffer_arg),
DRM_MEM_DRIVER); DRM_MEM_DRIVER);
if (relocs == NULL) { if (cbuffer.args == NULL) {
drm_bo_read_unlock(&dev->bm.bm_lock); ret = -ENOMEM;
return -ENOMEM; goto out_free;
}
buffers = drm_calloc(execbuffer->args_count,
sizeof(struct drm_buffer_object *),
DRM_MEM_DRIVER);
if (buffers == NULL) {
drm_free(relocs, (execbuffer->args_count * sizeof(uint32_t)),
DRM_MEM_DRIVER);
drm_bo_read_unlock(&dev->bm.bm_lock);
return -ENOMEM;
} }
INIT_LIST_HEAD(&cbuffer.arg_unused.list);
INIT_LIST_HEAD(&cbuffer.arg_used.list);
/* process arguments */ /* process arguments */
ret = radeon_ms_execbuffer_args(dev, file_priv, execbuffer, DRM_INFO("[radeon_ms] exec step - 02.00\n");
buffers, relocs); ret = radeon_ms_execbuffer_args(dev, file_priv, execbuffer, &cbuffer);
if (ret) { if (ret) {
DRM_ERROR("[radeon_ms] execbuffer wrong arguments\n"); DRM_ERROR("[radeon_ms] execbuffer wrong arguments\n");
goto out_free; goto out_free;
} }
/* map command buffer */ /* map command buffer */
DRM_INFO("[radeon_ms] exec step - 03.00\n");
cbuffer.cbuffer_dw_count = (cbuffer.args[0].buffer->mem.num_pages *
PAGE_SIZE) >> 2;
if (execbuffer->cmd_size > cbuffer.cbuffer_dw_count) {
ret = -EINVAL;
goto out_free_release;
}
DRM_INFO("[radeon_ms] exec step - 04.00\n");
cbuffer.cbuffer_dw_count = execbuffer->cmd_size;
memset(&cmd_kmap, 0, sizeof(struct drm_bo_kmap_obj)); memset(&cmd_kmap, 0, sizeof(struct drm_bo_kmap_obj));
ret = drm_bo_kmap(buffers[0], ret = drm_bo_kmap(cbuffer.args[0].buffer, 0,
0, cbuffer.args[0].buffer->mem.num_pages, &cmd_kmap);
buffers[0]->mem.num_pages,
&cmd_kmap);
if (ret) { if (ret) {
DRM_ERROR("[radeon_ms] error mapping ring buffer: %d\n", ret); DRM_ERROR("[radeon_ms] error mapping ring buffer: %d\n", ret);
goto out_free_release; goto out_free_release;
} }
cmd = drm_bmo_virtual(&cmd_kmap, &cmd_is_iomem); DRM_INFO("[radeon_ms] exec step - 05.00\n");
/* do cmd checking & relocations */ cbuffer.cbuffer = drm_bmo_virtual(&cmd_kmap, &cmd_is_iomem);
ret = radeon_ms_execbuffer_check(dev, file_priv, execbuffer, DRM_INFO("[radeon_ms] exec step - 05.01\n");
buffers, relocs, cmd); list_del(&cbuffer.args[0].list);
if (ret) { DRM_INFO("[radeon_ms] exec step - 05.02\n");
drm_putback_buffer_objects(dev); list_add_tail(&cbuffer.args[0].list , &cbuffer.arg_used.list);
goto out_free_release; DRM_INFO("[radeon_ms] exec step - 05.03\n");
}
ret = radeon_ms_ring_emit(dev, cmd, execbuffer->cmd_size); /* do cmd checking & relocations */
ret = amd_cbuffer_check(dev, file_priv, &cbuffer);
if (ret) { if (ret) {
drm_putback_buffer_objects(dev); drm_putback_buffer_objects(dev);
goto out_free_release; goto out_free_release;
} }
DRM_INFO("[radeon_ms] exec step - 06.00\n");
ret = radeon_ms_ring_emit(dev, cbuffer.cbuffer,
cbuffer.cbuffer_dw_count);
if (ret) {
drm_putback_buffer_objects(dev);
goto out_free_release;
}
DRM_INFO("[radeon_ms] exec step - 07.00\n");
/* fence */ /* fence */
if (execbuffer->args_count > 1) {
ret = drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence); ret = drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence);
if (ret) { if (ret) {
drm_putback_buffer_objects(dev); drm_putback_buffer_objects(dev);
@ -219,7 +407,8 @@ int radeon_ms_execbuffer(struct drm_device *dev, void *data,
} }
if (!(fence_arg->flags & DRM_FENCE_FLAG_NO_USER)) { if (!(fence_arg->flags & DRM_FENCE_FLAG_NO_USER)) {
ret = drm_fence_add_user_object(file_priv, fence, ret = drm_fence_add_user_object(file_priv, fence,
fence_arg->flags & DRM_FENCE_FLAG_SHAREABLE); fence_arg->flags &
DRM_FENCE_FLAG_SHAREABLE);
if (!ret) { if (!ret) {
fence_arg->handle = fence->base.hash.key; fence_arg->handle = fence->base.hash.key;
fence_arg->fence_class = fence->fence_class; fence_arg->fence_class = fence->fence_class;
@ -229,16 +418,16 @@ int radeon_ms_execbuffer(struct drm_device *dev, void *data,
} }
} }
drm_fence_usage_deref_unlocked(&fence); drm_fence_usage_deref_unlocked(&fence);
} DRM_INFO("[radeon_ms] exec step - 08.00\n");
out_free_release: out_free_release:
drm_bo_kunmap(&cmd_kmap); drm_bo_kunmap(&cmd_kmap);
radeon_ms_execbuffer_args_clean(dev, buffers, execbuffer->args_count); radeon_ms_execbuffer_args_clean(dev, &cbuffer, execbuffer->args_count);
DRM_INFO("[radeon_ms] exec step - 09.00\n");
out_free: out_free:
drm_free(relocs, (execbuffer->args_count * sizeof(uint32_t)), drm_free(cbuffer.args,
DRM_MEM_DRIVER); (execbuffer->args_count * sizeof(struct amd_cbuffer_arg)),
drm_free(buffers,
(execbuffer->args_count * sizeof(struct drm_buffer_object *)),
DRM_MEM_DRIVER); DRM_MEM_DRIVER);
drm_bo_read_unlock(&dev->bm.bm_lock); drm_bo_read_unlock(&dev->bm.bm_lock);
DRM_INFO("[radeon_ms] exec step - 10.00\n");
return ret; return ret;
} }

View File

@ -117,6 +117,7 @@ int radeon_ms_family_init(struct drm_device *dev)
switch (dev_priv->bus_type) { switch (dev_priv->bus_type) {
case RADEON_AGP: case RADEON_AGP:
dev_priv->create_ttm = drm_agp_init_ttm; dev_priv->create_ttm = drm_agp_init_ttm;
dev_priv->bus_finish = radeon_ms_agp_finish;
dev_priv->bus_init = radeon_ms_agp_init; dev_priv->bus_init = radeon_ms_agp_init;
dev_priv->bus_restore = radeon_ms_agp_restore; dev_priv->bus_restore = radeon_ms_agp_restore;
dev_priv->bus_save = radeon_ms_agp_save; dev_priv->bus_save = radeon_ms_agp_save;

View File

@ -128,7 +128,7 @@ static void radeon_ms_gpu_reset(struct drm_device *dev)
MMIO_W(RBBM_SOFT_RESET, 0); MMIO_W(RBBM_SOFT_RESET, 0);
MMIO_R(RBBM_SOFT_RESET); MMIO_R(RBBM_SOFT_RESET);
#if 0 #if 1
cache_mode = MMIO_R(RB2D_DSTCACHE_MODE); cache_mode = MMIO_R(RB2D_DSTCACHE_MODE);
MMIO_W(RB2D_DSTCACHE_MODE, MMIO_W(RB2D_DSTCACHE_MODE,
cache_mode | RB2D_DSTCACHE_MODE__DC_DISABLE_IGNORE_PE); cache_mode | RB2D_DSTCACHE_MODE__DC_DISABLE_IGNORE_PE);
@ -576,7 +576,6 @@ void radeon_ms_gpu_save(struct drm_device *dev, struct radeon_state *state)
int radeon_ms_wait_for_idle(struct drm_device *dev) int radeon_ms_wait_for_idle(struct drm_device *dev)
{ {
struct drm_radeon_private *dev_priv = dev->dev_private; struct drm_radeon_private *dev_priv = dev->dev_private;
struct radeon_state *state = &dev_priv->driver_state;
int i, j, ret; int i, j, ret;
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {

View File

@ -1755,4 +1755,33 @@
#define VAP_PVS_STATE_FLUSH_REG__DATA_REGISTER__MASK 0xFFFFFFFF #define VAP_PVS_STATE_FLUSH_REG__DATA_REGISTER__MASK 0xFFFFFFFF
#define VAP_PVS_STATE_FLUSH_REG__DATA_REGISTER__SHIFT 0 #define VAP_PVS_STATE_FLUSH_REG__DATA_REGISTER__SHIFT 0
/* packet stuff **************************************************************/
#define PACKET_HEADER_MASK 0xC0000000
#define PACKET_HEADER_SHIFT 30
#define PACKET_HEADER_GET(p) (((p) & PACKET_HEADER_MASK) >> PACKET_HEADER_SHIFT)
#define PACKET_HEADER_SET(p) (((p) << PACKET_HEADER_SHIFT) & PACKET_HEADER_MASK)
#define PACKET0_HEADER 0x0
# define PACKET0_REG_MASK 0x00001FFF
# define PACKET0_REG_SHIFT 0
# define PACKET0_COUNT_MASK 0x3FFF0000
# define PACKET0_COUNT_SHIFT 16
#define PACKET1_HEADER 0x1
#define PACKET2_HEADER 0x2
#define PACKET3_HEADER 0x3
# define PACKET3_OPCODE_MASK 0x0000FF00
# define PACKET3_OPCODE_SHIFT 8
# define PACKET3_OPCODE_NOP 0x10
# define PACKET3_OPCODE_BITBLT 0x92
# define PACKET3_OPCODE_BITBLT_MULTI 0x9B
# define PACKET3_COUNT_MASK 0x3FFF0000
# define PACKET3_COUNT_SHIFT 16
#define CP_PACKET0(r, n) (PACKET_HEADER_SET(PACKET0_HEADER) |\
((((r)>>2)<<PACKET0_REG_SHIFT) & PACKET0_REG_MASK) |\
(((n) << PACKET0_COUNT_SHIFT) & PACKET0_COUNT_MASK))
#define CP_PACKET3(o, n) (PACKET_HEADER_SET(PACKET3_HEADER) |\
(((o)<<PACKET3_OPCODE_SHIFT) & PACKET3_OPCODE_MASK) |\
(((n)<<PACKET3_COUNT_SHIFT) & PACKET3_COUNT_MASK))
#endif #endif