radeon command submission start

take code from Jerome munge into a TTM IB re-use
main
Dave Airlie 2008-07-28 15:21:13 +10:00
parent 5282a505b6
commit 38835f9cd2
9 changed files with 450 additions and 51 deletions

View File

@ -41,7 +41,7 @@ nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
nv50_kms_wrapper.o \
nv50_fbcon.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o radeon_gem.o \
radeon_buffer.o radeon_fence.o atom.o radeon_display.o radeon_atombios.o radeon_i2c.o radeon_connectors.o \
radeon_buffer.o radeon_fence.o atom.o radeon_display.o radeon_atombios.o radeon_i2c.o radeon_connectors.o radeon_cs.o \
atombios_crtc.o radeon_encoders.o radeon_fb.o radeon_combios.o
sis-objs := sis_drv.o sis_mm.o
ffb-objs := ffb_drv.o ffb_context.o

1
linux-core/radeon_cs.c Symbolic link
View File

@ -0,0 +1 @@
../shared-core/radeon_cs.c

View File

@ -27,6 +27,9 @@
#include "radeon_drm.h"
#include "radeon_drv.h"
static int radeon_gem_ib_init(struct drm_device *dev);
static int radeon_gem_ib_destroy(struct drm_device *dev);
int radeon_gem_init_object(struct drm_gem_object *obj)
{
struct drm_radeon_gem_object *obj_priv;
@ -513,19 +516,19 @@ static int radeon_gart_init(struct drm_device *dev)
ret = drm_buffer_object_create(dev, RADEON_PCIGART_TABLE_SIZE,
drm_bo_type_kernel,
DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_VRAM | DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_NO_EVICT,
0, 1, 0, &dev_priv->mm.pcie_table);
0, 1, 0, &dev_priv->mm.pcie_table.bo);
if (ret)
return -EINVAL;
DRM_DEBUG("pcie table bo created %p, %x\n", dev_priv->mm.pcie_table, dev_priv->mm.pcie_table->offset);
ret = drm_bo_kmap(dev_priv->mm.pcie_table, 0, RADEON_PCIGART_TABLE_SIZE >> PAGE_SHIFT,
&dev_priv->mm.pcie_table_map);
DRM_DEBUG("pcie table bo created %p, %x\n", dev_priv->mm.pcie_table.bo, dev_priv->mm.pcie_table.bo->offset);
ret = drm_bo_kmap(dev_priv->mm.pcie_table.bo, 0, RADEON_PCIGART_TABLE_SIZE >> PAGE_SHIFT,
&dev_priv->mm.pcie_table.kmap);
if (ret)
return -EINVAL;
dev_priv->pcigart_offset_set = 2;
dev_priv->gart_info.bus_addr = dev_priv->fb_location + dev_priv->mm.pcie_table->offset;
dev_priv->gart_info.addr = dev_priv->mm.pcie_table_map.virtual;
dev_priv->gart_info.bus_addr = dev_priv->fb_location + dev_priv->mm.pcie_table.bo->offset;
dev_priv->gart_info.addr = dev_priv->mm.pcie_table.kmap.virtual;
dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE;
dev_priv->gart_info.gart_table_location = DRM_ATI_GART_FB;
memset(dev_priv->gart_info.addr, 0, RADEON_PCIGART_TABLE_SIZE);
@ -566,14 +569,14 @@ int radeon_alloc_gart_objects(struct drm_device *dev)
drm_bo_type_kernel,
DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_TT |
DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_NO_EVICT,
0, 1, 0, &dev_priv->mm.ring);
0, 1, 0, &dev_priv->mm.ring.bo);
if (ret) {
DRM_ERROR("failed to allocate ring\n");
return -EINVAL;
}
ret = drm_bo_kmap(dev_priv->mm.ring, 0, RADEON_DEFAULT_RING_SIZE >> PAGE_SHIFT,
&dev_priv->mm.ring_map);
ret = drm_bo_kmap(dev_priv->mm.ring.bo, 0, RADEON_DEFAULT_RING_SIZE >> PAGE_SHIFT,
&dev_priv->mm.ring.kmap);
if (ret) {
DRM_ERROR("failed to map ring\n");
return -EINVAL;
@ -583,24 +586,26 @@ int radeon_alloc_gart_objects(struct drm_device *dev)
drm_bo_type_kernel,
DRM_BO_FLAG_WRITE |DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_TT |
DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_NO_EVICT,
0, 1, 0, &dev_priv->mm.ring_read_ptr);
0, 1, 0, &dev_priv->mm.ring_read.bo);
if (ret) {
DRM_ERROR("failed to allocate ring read\n");
return -EINVAL;
}
ret = drm_bo_kmap(dev_priv->mm.ring_read_ptr, 0,
ret = drm_bo_kmap(dev_priv->mm.ring_read.bo, 0,
PAGE_SIZE >> PAGE_SHIFT,
&dev_priv->mm.ring_read_ptr_map);
&dev_priv->mm.ring_read.kmap);
if (ret) {
DRM_ERROR("failed to map ring read\n");
return -EINVAL;
}
DRM_DEBUG("Ring ptr %p mapped at %d %p, read ptr %p maped at %d %p\n",
dev_priv->mm.ring, dev_priv->mm.ring->offset, dev_priv->mm.ring_map.virtual,
dev_priv->mm.ring_read_ptr, dev_priv->mm.ring_read_ptr->offset, dev_priv->mm.ring_read_ptr_map.virtual);
dev_priv->mm.ring.bo, dev_priv->mm.ring.bo->offset, dev_priv->mm.ring.kmap.virtual,
dev_priv->mm.ring_read.bo, dev_priv->mm.ring_read.bo->offset, dev_priv->mm.ring_read.kmap.virtual);
/* init the indirect buffers */
radeon_gem_ib_init(dev);
return 0;
}
@ -634,6 +639,8 @@ int radeon_gem_mm_init(struct drm_device *dev)
ret = radeon_alloc_gart_objects(dev);
if (ret)
return -EINVAL;
return 0;
}
@ -641,16 +648,19 @@ void radeon_gem_mm_fini(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
radeon_gem_ib_destroy(dev);
mutex_lock(&dev->struct_mutex);
if (dev_priv->mm.ring_read_ptr) {
drm_bo_kunmap(&dev_priv->mm.ring_read_ptr_map);
drm_bo_usage_deref_locked(&dev_priv->mm.ring_read_ptr);
if (dev_priv->mm.ring_read.bo) {
drm_bo_kunmap(&dev_priv->mm.ring_read.kmap);
drm_bo_usage_deref_locked(&dev_priv->mm.ring_read.bo);
}
if (dev_priv->mm.ring) {
drm_bo_kunmap(&dev_priv->mm.ring_map);
drm_bo_usage_deref_locked(&dev_priv->mm.ring);
if (dev_priv->mm.ring.bo) {
drm_bo_kunmap(&dev_priv->mm.ring.kmap);
drm_bo_usage_deref_locked(&dev_priv->mm.ring.bo);
}
if (drm_bo_clean_mm(dev, DRM_BO_MEM_TT, 1)) {
@ -658,9 +668,9 @@ void radeon_gem_mm_fini(struct drm_device *dev)
}
if (dev_priv->flags & RADEON_IS_PCIE) {
if (dev_priv->mm.pcie_table) {
drm_bo_kunmap(&dev_priv->mm.pcie_table_map);
drm_bo_usage_deref_locked(&dev_priv->mm.pcie_table);
if (dev_priv->mm.pcie_table.bo) {
drm_bo_kunmap(&dev_priv->mm.pcie_table.kmap);
drm_bo_usage_deref_locked(&dev_priv->mm.pcie_table.bo);
}
}
@ -685,3 +695,158 @@ int radeon_gem_object_pin(struct drm_gem_object *obj,
return ret;
}
#define RADEON_IB_MEMORY (1*1024*1024)
#define RADEON_IB_SIZE (65536)
#define RADEON_NUM_IB (RADEON_IB_MEMORY / RADEON_IB_SIZE)
int radeon_gem_ib_get(struct drm_device *dev, void **ib, uint32_t dwords)
{
int i, index = -1;
int ret;
drm_radeon_private_t *dev_priv = dev->dev_private;
for (i = 0; i < RADEON_NUM_IB; i++) {
if (!(dev_priv->ib_alloc_bitmap & (1 << i))){
index = i;
break;
}
}
/* if all in use we need to wait */
if (index == -1) {
for (i = 0; i < RADEON_NUM_IB; i++) {
if (dev_priv->ib_alloc_bitmap & (1 << i)) {
mutex_lock(&dev_priv->ib_objs[i]->bo->mutex);
ret = drm_bo_wait(dev_priv->ib_objs[i]->bo, 0, 1, 0, 0);
mutex_unlock(&dev_priv->ib_objs[i]->bo->mutex);
if (ret)
continue;
dev_priv->ib_alloc_bitmap &= ~(1 << i);
index = i;
break;
}
}
}
if (index == -1) {
DRM_ERROR("Major case fail to allocate IB from freelist %x\n", dev_priv->ib_alloc_bitmap);
return -EINVAL;
}
if (dwords > RADEON_IB_SIZE / sizeof(uint32_t))
return -EINVAL;
*ib = dev_priv->ib_objs[index]->kmap.virtual;
dev_priv->ib_alloc_bitmap |= (1 << i);
return 0;
}
static void radeon_gem_ib_free(struct drm_device *dev, void *ib, uint32_t dwords)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
struct drm_fence_object *fence;
int ret;
int i;
RING_LOCALS;
for (i = 0; i < RADEON_NUM_IB; i++) {
if (dev_priv->ib_objs[i]->kmap.virtual == ib) {
ret = drm_bo_do_validate(dev_priv->ib_objs[i]->bo, 0,
DRM_BO_FLAG_NO_EVICT,
0, 0, NULL);
if (ret)
DRM_ERROR("FAiled to validate\n");
DRM_DEBUG("validated IB %x, %d\n", dev_priv->ib_objs[i]->bo->offset, dwords);
BEGIN_RING(4);
OUT_RING(CP_PACKET0(RADEON_CP_IB_BASE, 1));
OUT_RING(dev_priv->gart_vm_start + dev_priv->ib_objs[i]->bo->offset);
OUT_RING(dwords);
OUT_RING(CP_PACKET2());
ADVANCE_RING();
COMMIT_RING();
/* emit a fence object */
ret = drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence);
if (ret) {
drm_putback_buffer_objects(dev);
}
/* dereference the fence object */
if (fence)
drm_fence_usage_deref_unlocked(&fence);
}
}
}
static int radeon_gem_ib_destroy(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
int i;
if (dev_priv->ib_objs) {
for (i = 0; i < RADEON_NUM_IB; i++) {
if (dev_priv->ib_objs[i]) {
drm_bo_kunmap(&dev_priv->ib_objs[i]->kmap);
drm_bo_usage_deref_unlocked(&dev_priv->ib_objs[i]->bo);
}
drm_free(dev_priv->ib_objs[i], sizeof(struct radeon_mm_obj), DRM_MEM_DRIVER);
}
drm_free(dev_priv->ib_objs, RADEON_NUM_IB*sizeof(struct radeon_mm_obj *), DRM_MEM_DRIVER);
}
dev_priv->ib_objs = NULL;
return 0;
}
/* allocate 1MB of 64k IBs the the kernel can keep mapped */
static int radeon_gem_ib_init(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
int i;
int ret;
dev_priv->ib_objs = drm_calloc(RADEON_NUM_IB, sizeof(struct radeon_mm_obj *), DRM_MEM_DRIVER);
if (!dev_priv->ib_objs)
goto free_all;
for (i = 0; i < RADEON_NUM_IB; i++) {
dev_priv->ib_objs[i] = drm_calloc(1, sizeof(struct radeon_mm_obj), DRM_MEM_DRIVER);
if (!dev_priv->ib_objs[i])
goto free_all;
ret = drm_buffer_object_create(dev, RADEON_IB_SIZE,
drm_bo_type_kernel,
DRM_BO_FLAG_READ | DRM_BO_FLAG_MEM_TT |
DRM_BO_FLAG_MAPPABLE, 0,
0, 0, &dev_priv->ib_objs[i]->bo);
if (ret)
goto free_all;
ret = drm_bo_kmap(dev_priv->ib_objs[i]->bo, 0, RADEON_IB_SIZE >> PAGE_SHIFT,
&dev_priv->ib_objs[i]->kmap);
if (ret)
goto free_all;
}
dev_priv->ib_alloc_bitmap = 0;
dev_priv->cs.ib_get = radeon_gem_ib_get;
dev_priv->cs.ib_free = radeon_gem_ib_free;
radeon_cs_init(dev);
return 0;
free_all:
radeon_gem_ib_destroy(dev);
return -ENOMEM;
}

View File

@ -3086,6 +3086,10 @@
# define RADEON_CSQ_PRIPIO_INDBM (3 << 28)
# define RADEON_CSQ_PRIBM_INDBM (4 << 28)
# define RADEON_CSQ_PRIPIO_INDPIO (15 << 28)
#define R300_CP_RESYNC_ADDR 0x778
#define R300_CP_RESYNC_DATA 0x77c
#define RADEON_CP_CSQ_STAT 0x07f8
# define RADEON_CSQ_RPTR_PRIMARY_MASK (0xff << 0)
# define RADEON_CSQ_WPTR_PRIMARY_MASK (0xff << 8)

View File

@ -658,8 +658,8 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
((dev_priv->gart_vm_start - 1) & 0xffff0000)
| (dev_priv->fb_location >> 16));
if (dev_priv->mm.ring) {
ring_start = dev_priv->mm.ring->offset +
if (dev_priv->mm.ring.bo) {
ring_start = dev_priv->mm.ring.bo->offset +
dev_priv->gart_vm_start;
} else
#if __OS_HAS_AGP
@ -692,9 +692,9 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
dev_priv->ring.tail = cur_read_ptr;
if (dev_priv->mm.ring_read_ptr) {
if (dev_priv->mm.ring_read.bo) {
RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
dev_priv->mm.ring_read_ptr->offset +
dev_priv->mm.ring_read.bo->offset +
dev_priv->gart_vm_start);
} else
#if __OS_HAS_AGP
@ -745,9 +745,9 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR)
+ RADEON_SCRATCH_REG_OFFSET);
if (dev_priv->mm.ring_read_ptr)
if (dev_priv->mm.ring_read.bo)
dev_priv->scratch = ((__volatile__ u32 *)
dev_priv->mm.ring_read_ptr_map.virtual +
dev_priv->mm.ring_read.kmap.virtual +
(RADEON_SCRATCH_REG_OFFSET / sizeof(u32)));
else
dev_priv->scratch = ((__volatile__ u32 *)
@ -772,12 +772,18 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
radeon_do_wait_for_idle(dev_priv);
/* Sync everything up */
if (dev_priv->chip_family > CHIP_RV280) {
RADEON_WRITE(RADEON_ISYNC_CNTL,
(RADEON_ISYNC_ANY2D_IDLE3D |
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI));
} else {
RADEON_WRITE(RADEON_ISYNC_CNTL,
(RADEON_ISYNC_ANY2D_IDLE3D |
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI));
}
}
static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
@ -785,8 +791,8 @@ static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
u32 tmp;
void *ring_read_ptr;
if (dev_priv->mm.ring_read_ptr)
ring_read_ptr = dev_priv->mm.ring_read_ptr_map.virtual;
if (dev_priv->mm.ring_read.bo)
ring_read_ptr = dev_priv->mm.ring_read.kmap.virtual;
else
ring_read_ptr = dev_priv->ring_rptr->handle;
@ -2282,8 +2288,8 @@ int radeon_modeset_cp_init(struct drm_device *dev)
dev_priv->ring.size = RADEON_DEFAULT_RING_SIZE;
dev_priv->cp_mode = RADEON_CSQ_PRIBM_INDBM;
dev_priv->ring.start = (u32 *)(void *)(unsigned long)dev_priv->mm.ring_map.virtual;
dev_priv->ring.end = (u32 *)(void *)(unsigned long)dev_priv->mm.ring_map.virtual +
dev_priv->ring.start = (u32 *)(void *)(unsigned long)dev_priv->mm.ring.kmap.virtual;
dev_priv->ring.end = (u32 *)(void *)(unsigned long)dev_priv->mm.ring.kmap.virtual +
dev_priv->ring.size / sizeof(u32);
dev_priv->ring.size_l2qw = drm_order(dev_priv->ring.size / 8);
dev_priv->ring.rptr_update = 4096;
@ -2297,7 +2303,7 @@ int radeon_modeset_cp_init(struct drm_device *dev)
radeon_cp_load_microcode(dev_priv);
DRM_DEBUG("ring offset is %x %x\n", dev_priv->mm.ring->offset, dev_priv->mm.ring_read_ptr->offset);
DRM_DEBUG("ring offset is %x %x\n", dev_priv->mm.ring.bo->offset, dev_priv->mm.ring_read.bo->offset);
radeon_cp_init_ring_buffer(dev, dev_priv);

181
shared-core/radeon_cs.c Normal file
View File

@ -0,0 +1,181 @@
/*
* Copyright 2008 Jerome Glisse.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Jerome Glisse <glisse@freedesktop.org>
*/
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon_drv.h"
#include "r300_reg.h"
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv)
{
struct drm_radeon_private *radeon = dev->dev_private;
struct drm_radeon_cs *cs = data;
uint32_t *packets = NULL;
uint32_t cs_id;
void *ib = NULL;
long size;
int r;
/* set command stream id to 0 which is fake id */
cs_id = 0;
DRM_COPY_TO_USER(&cs->cs_id, &cs_id, sizeof(uint32_t));
if (radeon == NULL) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
if (!cs->dwords) {
return 0;
}
/* limit cs to 64K ib */
if (cs->dwords > (16 * 1024)) {
return -EINVAL;
}
/* copy cs from userspace maybe we should copy into ib to save
* one copy but ib will be mapped wc so not good for cmd checking
* somethings worth testing i guess (Jerome)
*/
size = cs->dwords * sizeof(uint32_t);
packets = drm_alloc(size, DRM_MEM_DRIVER);
if (packets == NULL) {
return -ENOMEM;
}
if (DRM_COPY_FROM_USER(packets, (void __user *)(unsigned long)cs->packets, size)) {
r = -EFAULT;
goto out;
}
/* get ib */
r = radeon->cs.ib_get(dev, &ib, cs->dwords);
if (r) {
goto out;
}
/* now parse command stream */
r = radeon->cs.parse(dev, ib, packets, cs->dwords);
if (r) {
goto out;
}
/* emit cs id sequence */
radeon->cs.id_emit(dev, &cs_id);
DRM_COPY_TO_USER(&cs->cs_id, &cs_id, sizeof(uint32_t));
out:
radeon->cs.ib_free(dev, ib, cs->dwords);
drm_free(packets, size, DRM_MEM_DRIVER);
return r;
}
int radeon_cs_parse(struct drm_device *dev, void *ib,
uint32_t *packets, uint32_t dwords)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
volatile int rb;
/* copy the packet into the IB */
memcpy(ib, packets, dwords * sizeof(uint32_t));
/* read back last byte to flush WC buffers */
rb = readl((ib + (dwords-1) * sizeof(uint32_t)));
return 0;
}
uint32_t radeon_cs_id_get(struct drm_radeon_private *radeon)
{
/* FIXME: protect with a spinlock */
/* FIXME: check if wrap affect last reported wrap & sequence */
radeon->cs.id_scnt = (radeon->cs.id_scnt + 1) & 0x00FFFFFF;
if (!radeon->cs.id_scnt) {
/* increment wrap counter */
radeon->cs.id_wcnt += 0x01000000;
/* valid sequence counter start at 1 */
radeon->cs.id_scnt = 1;
}
return (radeon->cs.id_scnt | radeon->cs.id_wcnt);
}
void r100_cs_id_emit(struct drm_device *dev, uint32_t *id)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
/* ISYNC_CNTL should have CPSCRACTH bit set */
*id = radeon_cs_id_get(dev_priv);
/* emit id in SCRATCH4 (not used yet in old drm) */
BEGIN_RING(2);
OUT_RING(CP_PACKET0(RADEON_SCRATCH_REG4, 0));
OUT_RING(*id);
ADVANCE_RING();
}
void r300_cs_id_emit(struct drm_device *dev, uint32_t *id)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
/* ISYNC_CNTL should not have CPSCRACTH bit set */
*id = radeon_cs_id_get(dev_priv);
/* emit id in SCRATCH6 */
BEGIN_RING(6);
OUT_RING(CP_PACKET0(R300_CP_RESYNC_ADDR, 0));
OUT_RING(6);
OUT_RING(CP_PACKET0(R300_CP_RESYNC_DATA, 0));
OUT_RING(*id);
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
OUT_RING(R300_RB3D_DC_FINISH);
ADVANCE_RING();
}
uint32_t r100_cs_id_last_get(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
return RADEON_READ(RADEON_SCRATCH_REG4);
}
uint32_t r300_cs_id_last_get(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
return RADEON_READ(RADEON_SCRATCH_REG6);
}
int radeon_cs_init(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
if (dev_priv->chip_family < CHIP_RV280) {
dev_priv->cs.id_emit = r100_cs_id_emit;
dev_priv->cs.id_last_get = r100_cs_id_last_get;
} else if (dev_priv->chip_family < CHIP_R600) {
dev_priv->cs.id_emit = r300_cs_id_emit;
dev_priv->cs.id_last_get = r300_cs_id_last_get;
}
dev_priv->cs.parse = radeon_cs_parse;
/* ib get depends on memory manager or not so memory manager */
return 0;
}

View File

@ -506,6 +506,7 @@ typedef struct {
#define DRM_RADEON_GEM_SET_DOMAIN 0x23
#define DRM_RADEON_GEM_INDIRECT 0x24 // temporary for X server
#define DRM_RADEON_CS 0x25
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
@ -545,6 +546,7 @@ typedef struct {
#define DRM_IOCTL_RADEON_GEM_SET_DOMAIN DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_DOMAIN, struct drm_radeon_gem_set_domain)
#define DRM_IOCTL_RADEON_GEM_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_INDIRECT, struct drm_radeon_gem_indirect)
#define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs)
typedef struct drm_radeon_init {
@ -861,4 +863,17 @@ struct drm_radeon_gem_indirect {
uint32_t used;
};
/* New interface which obsolete all previous interface.
*/
struct drm_radeon_cs {
// uint32_t __user *packets;
uint32_t dwords;
uint32_t cs_id;
uint64_t packets;
};
#endif

View File

@ -195,11 +195,11 @@ enum radeon_mac_model {
#define GET_RING_HEAD(dev_priv) (dev_priv->writeback_works ? \
(dev_priv->mm.ring_read_ptr ? readl(dev_priv->mm.ring_read_ptr_map.virtual + 0) : DRM_READ32((dev_priv)->ring_rptr, 0 )) : \
(dev_priv->mm.ring_read.bo ? readl(dev_priv->mm.ring_read.kmap.virtual + 0) : DRM_READ32((dev_priv)->ring_rptr, 0 )) : \
RADEON_READ(RADEON_CP_RB_RPTR))
#define SET_RING_HEAD(dev_priv,val) (dev_priv->mm.ring_read_ptr ? \
writel((val), dev_priv->mm.ring_read_ptr_map.virtual) : \
#define SET_RING_HEAD(dev_priv,val) (dev_priv->mm.ring_read.bo ? \
writel((val), dev_priv->mm.ring_read.kmap.virtual) : \
DRM_WRITE32((dev_priv)->ring_rptr, 0, (val)))
typedef struct drm_radeon_freelist {
@ -261,6 +261,11 @@ struct radeon_virt_surface {
struct drm_file *file_priv;
};
struct radeon_mm_obj {
struct drm_buffer_object *bo;
struct drm_bo_kmap_obj kmap;
};
struct radeon_mm_info {
uint64_t vram_offset; // Offset into GPU space
uint64_t vram_size;
@ -268,15 +273,10 @@ struct radeon_mm_info {
uint64_t gart_start;
uint64_t gart_size;
struct drm_buffer_object *pcie_table;
struct drm_bo_kmap_obj pcie_table_map;
struct drm_buffer_object *ring;
struct drm_bo_kmap_obj ring_map;
struct drm_buffer_object *ring_read_ptr;
struct drm_bo_kmap_obj ring_read_ptr_map;
struct radeon_mm_obj pcie_table;
struct radeon_mm_obj ring;
struct radeon_mm_obj ring_read;
};
#include "radeon_mode.h"
@ -286,6 +286,25 @@ struct drm_radeon_master_private {
drm_radeon_sarea_t *sarea_priv;
};
/* command submission struct */
struct drm_radeon_cs_priv {
uint32_t id_wcnt;
uint32_t id_scnt;
uint32_t id_last_wcnt;
uint32_t id_last_scnt;
int (*parse)(struct drm_device *dev, void *ib,
uint32_t *packets, uint32_t dwords);
void (*id_emit)(struct drm_device *dev, uint32_t *id);
uint32_t (*id_last_get)(struct drm_device *dev);
/* this ib handling callback are for hidding memory manager drm
* from memory manager less drm, free have to emit ib discard
* sequence into the ring */
int (*ib_get)(struct drm_device *dev, void **ib, uint32_t dwords);
uint32_t (*ib_get_ptr)(struct drm_device *dev, void *ib);
void (*ib_free)(struct drm_device *dev, void *ib, uint32_t dwords);
};
typedef struct drm_radeon_private {
drm_radeon_ring_buffer_t ring;
@ -392,7 +411,11 @@ typedef struct drm_radeon_private {
u32 ram_width;
enum radeon_pll_errata pll_errata;
struct radeon_mm_obj **ib_objs;
/* ib bitmap */
uint64_t ib_alloc_bitmap; // TO DO replace with a real bitmap
struct drm_radeon_cs_priv cs;
} drm_radeon_private_t;
typedef struct drm_radeon_buf_priv {
@ -669,14 +692,15 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev,
#define RADEON_SCRATCH_REG3 0x15ec
#define RADEON_SCRATCH_REG4 0x15f0
#define RADEON_SCRATCH_REG5 0x15f4
#define RADEON_SCRATCH_REG6 0x15f8
#define RADEON_SCRATCH_UMSK 0x0770
#define RADEON_SCRATCH_ADDR 0x0774
#define RADEON_SCRATCHOFF( x ) (RADEON_SCRATCH_REG_OFFSET + 4*(x))
#define GET_SCRATCH( x ) (dev_priv->writeback_works ? \
(dev_priv->mm.ring_read_ptr ? \
readl(dev_priv->mm.ring_read_ptr_map.virtual + RADEON_SCRATCHOFF(0)) : \
(dev_priv->mm.ring_read.bo ? \
readl(dev_priv->mm.ring_read.kmap.virtual + RADEON_SCRATCHOFF(0)) : \
DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(x))) : \
RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x)))
@ -1593,4 +1617,6 @@ extern void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on);
extern int radeon_master_create(struct drm_device *dev, struct drm_master *master);
extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master);
extern void radeon_cp_dispatch_flip(struct drm_device * dev, struct drm_master *master);
extern int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv);
extern int radeon_cs_init(struct drm_device *dev);
#endif /* __RADEON_DRV_H__ */

View File

@ -3279,6 +3279,7 @@ struct drm_ioctl_desc radeon_ioctls[] = {
DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_RADEON_GEM_INDIRECT, radeon_gem_indirect_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH),
};
int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);