Bug 6242: [mach64] Use private DMA buffers, part #4.

mach64_state.c: convert the DRM_MACH64_BLIT ioctl to submit a pointer to
user-space memory rather than a DMA buffer index, similar to DRM_MACH64_VERTEX.

This change allows the DDX to map the DMA buffers read-only and eliminate a
security problem where a client can alter the contents of the DMA buffer after
submission to the DRM.

This change also affects the DRI/DRM interface. Performace-wise, it basically
affects PCI mode where I get a ~12% speedup for some Mesa demos I tested.
This is mainly due to eliminating an ioctl for allocating the DMA buffer.

mach64_dma.c: move the responsibility for allocating memory for the DMA ring
in PCI mode to the DDX.

This change affects the DDX/DRM interface and unifies a couple of PCI/AGP code
paths for ring memory in the DRM.

Bump the mach64 DRM version major and date.
main
George Sapountzis 2006-10-02 06:13:38 +03:00
parent f3deef730d
commit c9e3aa961e
4 changed files with 62 additions and 79 deletions

View File

@ -815,10 +815,6 @@ static int mach64_do_dma_init(drm_device_t * dev, drm_mach64_init_t * init)
return DRM_ERR(EINVAL);
}
dev_priv->sarea_priv = (drm_mach64_sarea_t *)
((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
if (!dev_priv->is_pci) {
dev_priv->ring_map = drm_core_findmap(dev, init->ring_offset);
if (!dev_priv->ring_map) {
DRM_ERROR("can not find ring map!\n");
@ -826,6 +822,11 @@ static int mach64_do_dma_init(drm_device_t * dev, drm_mach64_init_t * init)
mach64_do_cleanup_dma(dev);
return DRM_ERR(EINVAL);
}
dev_priv->sarea_priv = (drm_mach64_sarea_t *)
((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
if (!dev_priv->is_pci) {
drm_core_ioremap(dev_priv->ring_map, dev);
if (!dev_priv->ring_map->handle) {
DRM_ERROR("can not ioremap virtual address for"
@ -891,27 +892,9 @@ static int mach64_do_dma_init(drm_device_t * dev, drm_mach64_init_t * init)
}
}
/* allocate descriptor memory from pci pool */
DRM_DEBUG("Allocating dma descriptor ring\n");
dev_priv->ring.size = 0x4000; /* 16KB */
if (dev_priv->is_pci) {
dev_priv->ring.dmah = drm_pci_alloc(dev, dev_priv->ring.size,
dev_priv->ring.size,
0xfffffffful);
if (!dev_priv->ring.dmah) {
DRM_ERROR("Allocating dma descriptor ring failed\n");
return DRM_ERR(ENOMEM);
} else {
dev_priv->ring.start = dev_priv->ring.dmah->vaddr;
dev_priv->ring.start_addr =
(u32) dev_priv->ring.dmah->busaddr;
}
} else {
dev_priv->ring.start = dev_priv->ring_map->handle;
dev_priv->ring.start_addr = (u32) dev_priv->ring_map->offset;
}
memset(dev_priv->ring.start, 0, dev_priv->ring.size);
DRM_INFO("descriptor ring: cpu addr %p, bus addr: 0x%08x\n",
@ -1149,19 +1132,15 @@ int mach64_do_cleanup_dma(drm_device_t * dev)
if (dev->dev_private) {
drm_mach64_private_t *dev_priv = dev->dev_private;
if (dev_priv->is_pci) {
if (dev_priv->ring.dmah) {
drm_pci_free(dev, dev_priv->ring.dmah);
}
} else {
if (!dev_priv->is_pci) {
if (dev_priv->ring_map)
drm_core_ioremapfree(dev_priv->ring_map, dev);
}
if (dev->agp_buffer_map) {
drm_core_ioremapfree(dev->agp_buffer_map, dev);
dev->agp_buffer_map = NULL;
}
}
mach64_destroy_freelist(dev);

View File

@ -237,7 +237,7 @@ typedef struct drm_mach64_vertex {
} drm_mach64_vertex_t;
typedef struct drm_mach64_blit {
int idx;
void *buf;
int pitch;
int offset;
int format;

View File

@ -42,9 +42,9 @@
#define DRIVER_NAME "mach64"
#define DRIVER_DESC "DRM module for the ATI Rage Pro"
#define DRIVER_DATE "20020904"
#define DRIVER_DATE "20060718"
#define DRIVER_MAJOR 1
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
@ -61,7 +61,6 @@ typedef struct drm_mach64_freelist {
} drm_mach64_freelist_t;
typedef struct drm_mach64_descriptor_ring {
drm_dma_handle_t *dmah; /* Handle to pci dma memory */
void *start; /* write pointer (cpu address) to start of descriptor ring */
u32 start_addr; /* bus address of beginning of descriptor ring */
int size; /* size of ring in bytes */

View File

@ -480,7 +480,7 @@ static int mach64_do_get_frames_queued(drm_mach64_private_t * dev_priv)
/* Copy and verify a client submited buffer.
* FIXME: Make an assembly optimized version
*/
static __inline__ int copy_and_verify_from_user(u32 *to,
static __inline__ int copy_from_user_vertex(u32 *to,
const u32 __user *ufrom,
unsigned long bytes)
{
@ -489,7 +489,7 @@ static __inline__ int copy_and_verify_from_user(u32 *to,
from = drm_alloc(bytes, DRM_MEM_DRIVER);
if (from == NULL)
return ENOMEM;
return DRM_ERR(ENOMEM);
if (DRM_COPY_FROM_USER(from, ufrom, bytes)) {
drm_free(from, bytes, DRM_MEM_DRIVER);
@ -571,7 +571,7 @@ static int mach64_dma_dispatch_vertex(DRMFILE filp, drm_device_t * dev,
return DRM_ERR(EAGAIN);
}
verify_ret = copy_and_verify_from_user(GETBUFPTR(copy_buf), buf, used);
verify_ret = copy_from_user_vertex(GETBUFPTR(copy_buf), buf, used);
if (verify_ret != 0) {
mach64_freelist_put(dev_priv, copy_buf);
@ -627,13 +627,27 @@ _vertex_done:
return verify_ret;
}
static __inline__ int copy_from_user_blit(u32 *to,
const u32 __user *ufrom,
unsigned long bytes)
{
to = (u32 *)((char *)to + MACH64_HOSTDATA_BLIT_OFFSET);
if (DRM_COPY_FROM_USER(to, ufrom, bytes)) {
return DRM_ERR(EFAULT);
}
return 0;
}
static int mach64_dma_dispatch_blit(DRMFILE filp, drm_device_t * dev,
drm_mach64_blit_t * blit)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;
int dword_shift, dwords;
drm_buf_t *buf;
unsigned long used;
drm_buf_t *copy_buf;
int verify_ret = 0;
DMALOCALS;
/* The compiler won't optimize away a division by a variable,
@ -660,34 +674,34 @@ static int mach64_dma_dispatch_blit(DRMFILE filp, drm_device_t * dev,
return DRM_ERR(EINVAL);
}
/* Dispatch the blit buffer.
*/
buf = dma->buflist[blit->idx];
if (buf->filp != filp) {
DRM_ERROR("process %d (filp %p) using buffer with filp %p\n",
DRM_CURRENTPID, filp, buf->filp);
return DRM_ERR(EINVAL);
}
if (buf->pending) {
DRM_ERROR("sending pending buffer %d\n", blit->idx);
return DRM_ERR(EINVAL);
}
/* Set buf->used to the bytes of blit data based on the blit dimensions
* and verify the size. When the setup is emitted to the buffer with
* the DMA* macros below, buf->used is incremented to include the bytes
* used for setup as well as the blit data.
*/
dwords = (blit->width * blit->height) >> dword_shift;
buf->used = dwords << 2;
if (buf->used <= 0 ||
buf->used > MACH64_BUFFER_SIZE - MACH64_HOSTDATA_BLIT_OFFSET) {
DRM_ERROR("Invalid blit size: %d bytes\n", buf->used);
used = dwords << 2;
if (used <= 0 ||
used > MACH64_BUFFER_SIZE - MACH64_HOSTDATA_BLIT_OFFSET) {
DRM_ERROR("Invalid blit size: %lu bytes\n", used);
return DRM_ERR(EINVAL);
}
copy_buf = mach64_freelist_get(dev_priv);
if (copy_buf == NULL) {
DRM_ERROR("%s: couldn't get buffer\n", __FUNCTION__);
return DRM_ERR(EAGAIN);
}
verify_ret = copy_from_user_blit(GETBUFPTR(copy_buf), blit->buf, used);
if (verify_ret != 0) {
mach64_freelist_put(dev_priv, copy_buf);
goto _blit_done;
}
copy_buf->used = used;
/* FIXME: Use a last buffer flag and reduce the state emitted for subsequent,
* continuation buffers?
*/
@ -696,7 +710,7 @@ static int mach64_dma_dispatch_blit(DRMFILE filp, drm_device_t * dev,
* a register command every 16 dwords. State setup is added at the start of the
* buffer -- the client leaves space for this based on MACH64_HOSTDATA_BLIT_OFFSET
*/
DMASETPTR(buf);
DMASETPTR(copy_buf);
DMAOUTREG(MACH64_Z_CNTL, 0);
DMAOUTREG(MACH64_SCALE_3D_CNTL, 0);
@ -726,12 +740,13 @@ static int mach64_dma_dispatch_blit(DRMFILE filp, drm_device_t * dev,
DMAOUTREG(MACH64_DST_X_Y, (blit->y << 16) | blit->x);
DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (blit->height << 16) | blit->width);
DRM_DEBUG("%s: %d bytes\n", __FUNCTION__, buf->used);
DRM_DEBUG("%s: %lu bytes\n", __FUNCTION__, used);
/* Add the buffer to the queue */
DMAADVANCEHOSTDATA(dev_priv);
return 0;
_blit_done:
return verify_ret;
}
/* ================================================================
@ -829,7 +844,6 @@ int mach64_dma_vertex(DRM_IOCTL_ARGS)
int mach64_dma_blit(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mach64_blit_t blit;
@ -840,15 +854,6 @@ int mach64_dma_blit(DRM_IOCTL_ARGS)
DRM_COPY_FROM_USER_IOCTL(blit, (drm_mach64_blit_t *) data,
sizeof(blit));
DRM_DEBUG("%s: pid=%d index=%d\n",
__FUNCTION__, DRM_CURRENTPID, blit.idx);
if (blit.idx < 0 || blit.idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
blit.idx, dma->buf_count - 1);
return DRM_ERR(EINVAL);
}
ret = mach64_dma_dispatch_blit(filp, dev, &blit);
/* Make sure we restore the 3D state next time.