Import of XFree86 3.9.18

main
Kevin E Martin 2000-02-22 15:43:59 +00:00
parent 9a1197da5c
commit 7a9b291ab5
34 changed files with 9244 additions and 49 deletions

View File

@ -9,7 +9,7 @@
# Note 2! The CFLAGS definitions are now inherited from the
# parent makes..
#
# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/Makefile.kernel,v 1.2 1999/09/27 14:59:24 dawes Exp $
# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/Makefile.kernel,v 1.5 2000/02/14 06:27:25 martin Exp $
L_TARGET := libdrm.a
@ -22,7 +22,14 @@ ifdef CONFIG_DRM_GAMMA
M_OBJS += gamma.o
endif
ifdef CONFIG_DRM_TDFX
M_OBJS += tdfx.o
endif
include $(TOPDIR)/Rules.make
gamma.o: gamma_drv.o gamma_dma.o $(L_TARGET)
$(LD) $(LD_RFLAG) -r -o $@ gamma_drv.o gamma_dma.o -L. -ldrm
tdfx.o: tdfx_drv.o tdfx_context.o $(L_TARGET)
$(LD) $(LD_RFLAG) -r -o $@ tdfx_drv.o tdfx_context.o -L. -ldrm

762
linux-core/i810_dma.c Normal file
View File

@ -0,0 +1,762 @@
/* i810_dma.c -- DMA support for the i810 -*- linux-c -*-
* Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
* Jeff Hartmann <jhartmann@precisioninsight.com>
*
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/i810_dma.c,v 1.1 2000/02/11 17:26:04 dawes Exp $
*
*/
#define __NO_VERSION__
#include "drmP.h"
#include "i810_drv.h"
#include <linux/interrupt.h> /* For task queue support */
#define I810_REG(reg) 2
#define I810_BASE(reg) ((unsigned long) \
dev->maplist[I810_REG(reg)]->handle)
#define I810_ADDR(reg) (I810_BASE(reg) + reg)
#define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg)
#define I810_READ(reg) I810_DEREF(reg)
#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
void i810_dma_init(drm_device_t *dev)
{
printk(KERN_INFO "i810_dma_init\n");
}
void i810_dma_cleanup(drm_device_t *dev)
{
printk(KERN_INFO "i810_dma_cleanup\n");
}
static inline void i810_dma_dispatch(drm_device_t *dev, unsigned long address,
unsigned long length)
{
printk(KERN_INFO "i810_dma_dispatch\n");
}
static inline void i810_dma_quiescent(drm_device_t *dev)
{
}
static inline void i810_dma_ready(drm_device_t *dev)
{
i810_dma_quiescent(dev);
printk(KERN_INFO "i810_dma_ready\n");
}
static inline int i810_dma_is_ready(drm_device_t *dev)
{
i810_dma_quiescent(dev);
printk(KERN_INFO "i810_dma_is_ready\n");
return 1;
}
static void i810_dma_service(int irq, void *device, struct pt_regs *regs)
{
drm_device_t *dev = (drm_device_t *)device;
drm_device_dma_t *dma = dev->dma;
atomic_inc(&dev->total_irq);
if (i810_dma_is_ready(dev)) {
/* Free previous buffer */
if (test_and_set_bit(0, &dev->dma_flag)) {
atomic_inc(&dma->total_missed_free);
return;
}
if (dma->this_buffer) {
drm_free_buffer(dev, dma->this_buffer);
dma->this_buffer = NULL;
}
clear_bit(0, &dev->dma_flag);
/* Dispatch new buffer */
queue_task(&dev->tq, &tq_immediate);
mark_bh(IMMEDIATE_BH);
}
}
/* Only called by i810_dma_schedule. */
static int i810_do_dma(drm_device_t *dev, int locked)
{
unsigned long address;
unsigned long length;
drm_buf_t *buf;
int retcode = 0;
drm_device_dma_t *dma = dev->dma;
#if DRM_DMA_HISTOGRAM
cycles_t dma_start, dma_stop;
#endif
if (test_and_set_bit(0, &dev->dma_flag)) {
atomic_inc(&dma->total_missed_dma);
return -EBUSY;
}
#if DRM_DMA_HISTOGRAM
dma_start = get_cycles();
#endif
if (!dma->next_buffer) {
DRM_ERROR("No next_buffer\n");
clear_bit(0, &dev->dma_flag);
return -EINVAL;
}
buf = dma->next_buffer;
address = (unsigned long)buf->bus_address;
length = buf->used;
DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
buf->context, buf->idx, length);
if (buf->list == DRM_LIST_RECLAIM) {
drm_clear_next_buffer(dev);
drm_free_buffer(dev, buf);
clear_bit(0, &dev->dma_flag);
return -EINVAL;
}
if (!length) {
DRM_ERROR("0 length buffer\n");
drm_clear_next_buffer(dev);
drm_free_buffer(dev, buf);
clear_bit(0, &dev->dma_flag);
return 0;
}
if (!i810_dma_is_ready(dev)) {
clear_bit(0, &dev->dma_flag);
return -EBUSY;
}
if (buf->while_locked) {
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("Dispatching buffer %d from pid %d"
" \"while locked\", but no lock held\n",
buf->idx, buf->pid);
}
} else {
if (!locked && !drm_lock_take(&dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
atomic_inc(&dma->total_missed_lock);
clear_bit(0, &dev->dma_flag);
return -EBUSY;
}
}
if (dev->last_context != buf->context
&& !(dev->queuelist[buf->context]->flags
& _DRM_CONTEXT_PRESERVED)) {
/* PRE: dev->last_context != buf->context */
if (drm_context_switch(dev, dev->last_context, buf->context)) {
drm_clear_next_buffer(dev);
drm_free_buffer(dev, buf);
}
retcode = -EBUSY;
goto cleanup;
/* POST: we will wait for the context
switch and will dispatch on a later call
when dev->last_context == buf->context.
NOTE WE HOLD THE LOCK THROUGHOUT THIS
TIME! */
}
drm_clear_next_buffer(dev);
buf->pending = 1;
buf->waiting = 0;
buf->list = DRM_LIST_PEND;
#if DRM_DMA_HISTOGRAM
buf->time_dispatched = get_cycles();
#endif
i810_dma_dispatch(dev, address, length);
drm_free_buffer(dev, dma->this_buffer);
dma->this_buffer = buf;
atomic_add(length, &dma->total_bytes);
atomic_inc(&dma->total_dmas);
if (!buf->while_locked && !dev->context_flag && !locked) {
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
}
cleanup:
clear_bit(0, &dev->dma_flag);
#if DRM_DMA_HISTOGRAM
dma_stop = get_cycles();
atomic_inc(&dev->histo.dma[drm_histogram_slot(dma_stop - dma_start)]);
#endif
return retcode;
}
static void i810_dma_schedule_timer_wrapper(unsigned long dev)
{
i810_dma_schedule((drm_device_t *)dev, 0);
}
static void i810_dma_schedule_tq_wrapper(void *dev)
{
i810_dma_schedule(dev, 0);
}
int i810_dma_schedule(drm_device_t *dev, int locked)
{
int next;
drm_queue_t *q;
drm_buf_t *buf;
int retcode = 0;
int processed = 0;
int missed;
int expire = 20;
drm_device_dma_t *dma = dev->dma;
#if DRM_DMA_HISTOGRAM
cycles_t schedule_start;
#endif
if (test_and_set_bit(0, &dev->interrupt_flag)) {
/* Not reentrant */
atomic_inc(&dma->total_missed_sched);
return -EBUSY;
}
missed = atomic_read(&dma->total_missed_sched);
#if DRM_DMA_HISTOGRAM
schedule_start = get_cycles();
#endif
again:
if (dev->context_flag) {
clear_bit(0, &dev->interrupt_flag);
return -EBUSY;
}
if (dma->next_buffer) {
/* Unsent buffer that was previously
selected, but that couldn't be sent
because the lock could not be obtained
or the DMA engine wasn't ready. Try
again. */
atomic_inc(&dma->total_tried);
if (!(retcode = i810_do_dma(dev, locked))) {
atomic_inc(&dma->total_hit);
++processed;
}
} else {
do {
next = drm_select_queue(dev,
i810_dma_schedule_timer_wrapper);
if (next >= 0) {
q = dev->queuelist[next];
buf = drm_waitlist_get(&q->waitlist);
dma->next_buffer = buf;
dma->next_queue = q;
if (buf && buf->list == DRM_LIST_RECLAIM) {
drm_clear_next_buffer(dev);
drm_free_buffer(dev, buf);
}
}
} while (next >= 0 && !dma->next_buffer);
if (dma->next_buffer) {
if (!(retcode = i810_do_dma(dev, locked))) {
++processed;
}
}
}
if (--expire) {
if (missed != atomic_read(&dma->total_missed_sched)) {
atomic_inc(&dma->total_lost);
if (i810_dma_is_ready(dev)) goto again;
}
if (processed && i810_dma_is_ready(dev)) {
atomic_inc(&dma->total_lost);
processed = 0;
goto again;
}
}
clear_bit(0, &dev->interrupt_flag);
#if DRM_DMA_HISTOGRAM
atomic_inc(&dev->histo.schedule[drm_histogram_slot(get_cycles()
- schedule_start)]);
#endif
return retcode;
}
static int i810_dma_priority(drm_device_t *dev, drm_dma_t *d)
{
unsigned long address;
unsigned long length;
int must_free = 0;
int retcode = 0;
int i;
int idx;
drm_buf_t *buf;
drm_buf_t *last_buf = NULL;
drm_device_dma_t *dma = dev->dma;
DECLARE_WAITQUEUE(entry, current);
/* Turn off interrupt handling */
while (test_and_set_bit(0, &dev->interrupt_flag)) {
schedule();
if (signal_pending(current)) return -EINTR;
}
if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
while (!drm_lock_take(&dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
schedule();
if (signal_pending(current)) {
clear_bit(0, &dev->interrupt_flag);
return -EINTR;
}
}
++must_free;
}
atomic_inc(&dma->total_prio);
for (i = 0; i < d->send_count; i++) {
idx = d->send_indices[i];
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("Index %d (of %d max)\n",
d->send_indices[i], dma->buf_count - 1);
continue;
}
buf = dma->buflist[ idx ];
if (buf->pid != current->pid) {
DRM_ERROR("Process %d using buffer owned by %d\n",
current->pid, buf->pid);
retcode = -EINVAL;
goto cleanup;
}
if (buf->list != DRM_LIST_NONE) {
DRM_ERROR("Process %d using %d's buffer on list %d\n",
current->pid, buf->pid, buf->list);
retcode = -EINVAL;
goto cleanup;
}
/* This isn't a race condition on
buf->list, since our concern is the
buffer reclaim during the time the
process closes the /dev/drm? handle, so
it can't also be doing DMA. */
buf->list = DRM_LIST_PRIO;
buf->used = d->send_sizes[i];
buf->context = d->context;
buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
address = (unsigned long)buf->address;
length = buf->used;
if (!length) {
DRM_ERROR("0 length buffer\n");
}
if (buf->pending) {
DRM_ERROR("Sending pending buffer:"
" buffer %d, offset %d\n",
d->send_indices[i], i);
retcode = -EINVAL;
goto cleanup;
}
if (buf->waiting) {
DRM_ERROR("Sending waiting buffer:"
" buffer %d, offset %d\n",
d->send_indices[i], i);
retcode = -EINVAL;
goto cleanup;
}
buf->pending = 1;
if (dev->last_context != buf->context
&& !(dev->queuelist[buf->context]->flags
& _DRM_CONTEXT_PRESERVED)) {
add_wait_queue(&dev->context_wait, &entry);
current->state = TASK_INTERRUPTIBLE;
/* PRE: dev->last_context != buf->context */
drm_context_switch(dev, dev->last_context,
buf->context);
/* POST: we will wait for the context
switch and will dispatch on a later call
when dev->last_context == buf->context.
NOTE WE HOLD THE LOCK THROUGHOUT THIS
TIME! */
schedule();
current->state = TASK_RUNNING;
remove_wait_queue(&dev->context_wait, &entry);
if (signal_pending(current)) {
retcode = -EINTR;
goto cleanup;
}
if (dev->last_context != buf->context) {
DRM_ERROR("Context mismatch: %d %d\n",
dev->last_context,
buf->context);
}
}
#if DRM_DMA_HISTOGRAM
buf->time_queued = get_cycles();
buf->time_dispatched = buf->time_queued;
#endif
i810_dma_dispatch(dev, address, length);
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
atomic_add(length, &dma->total_bytes);
atomic_inc(&dma->total_dmas);
if (last_buf) {
drm_free_buffer(dev, last_buf);
}
last_buf = buf;
}
cleanup:
if (last_buf) {
i810_dma_ready(dev);
drm_free_buffer(dev, last_buf);
}
if (must_free && !dev->context_flag) {
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
}
clear_bit(0, &dev->interrupt_flag);
return retcode;
}
static int i810_dma_send_buffers(drm_device_t *dev, drm_dma_t *d)
{
DECLARE_WAITQUEUE(entry, current);
drm_buf_t *last_buf = NULL;
int retcode = 0;
drm_device_dma_t *dma = dev->dma;
if (d->flags & _DRM_DMA_BLOCK) {
last_buf = dma->buflist[d->send_indices[d->send_count-1]];
add_wait_queue(&last_buf->dma_wait, &entry);
}
if ((retcode = drm_dma_enqueue(dev, d))) {
if (d->flags & _DRM_DMA_BLOCK)
remove_wait_queue(&last_buf->dma_wait, &entry);
return retcode;
}
i810_dma_schedule(dev, 0);
if (d->flags & _DRM_DMA_BLOCK) {
DRM_DEBUG("%d waiting\n", current->pid);
current->state = TASK_INTERRUPTIBLE;
for (;;) {
if (!last_buf->waiting
&& !last_buf->pending)
break; /* finished */
schedule();
if (signal_pending(current)) {
retcode = -EINTR; /* Can't restart */
break;
}
}
current->state = TASK_RUNNING;
DRM_DEBUG("%d running\n", current->pid);
remove_wait_queue(&last_buf->dma_wait, &entry);
if (!retcode
|| (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
if (!waitqueue_active(&last_buf->dma_wait)) {
drm_free_buffer(dev, last_buf);
}
}
if (retcode) {
DRM_ERROR("ctx%d w%d p%d c%d i%d l%d %d/%d\n",
d->context,
last_buf->waiting,
last_buf->pending,
DRM_WAITCOUNT(dev, d->context),
last_buf->idx,
last_buf->list,
last_buf->pid,
current->pid);
}
}
return retcode;
}
int i810_dma(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
int retcode = 0;
drm_dma_t d;
printk("i810_dma start\n");
copy_from_user_ret(&d, (drm_dma_t *)arg, sizeof(d), -EFAULT);
DRM_DEBUG("%d %d: %d send, %d req\n",
current->pid, d.context, d.send_count, d.request_count);
if (d.context == DRM_KERNEL_CONTEXT || d.context >= dev->queue_slots) {
DRM_ERROR("Process %d using context %d\n",
current->pid, d.context);
return -EINVAL;
}
if (d.send_count < 0 || d.send_count > dma->buf_count) {
DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
current->pid, d.send_count, dma->buf_count);
return -EINVAL;
}
if (d.request_count < 0 || d.request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
current->pid, d.request_count, dma->buf_count);
return -EINVAL;
}
if (d.send_count) {
#if 0
if (d.flags & _DRM_DMA_PRIORITY)
retcode = i810_dma_priority(dev, &d);
else
retcode = i810_dma_send_buffers(dev, &d);
#endif
printk("i810_dma priority\n");
retcode = i810_dma_priority(dev, &d);
}
d.granted_count = 0;
if (!retcode && d.request_count) {
retcode = drm_dma_get_buffers(dev, &d);
}
DRM_DEBUG("%d returning, granted = %d\n",
current->pid, d.granted_count);
copy_to_user_ret((drm_dma_t *)arg, &d, sizeof(d), -EFAULT);
printk("i810_dma end (granted)\n");
return retcode;
}
int i810_irq_install(drm_device_t *dev, int irq)
{
int retcode;
if (!irq) return -EINVAL;
down(&dev->struct_sem);
if (dev->irq) {
up(&dev->struct_sem);
return -EBUSY;
}
dev->irq = irq;
up(&dev->struct_sem);
DRM_DEBUG("%d\n", irq);
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma_flag = 0;
dev->dma->next_buffer = NULL;
dev->dma->next_queue = NULL;
dev->dma->this_buffer = NULL;
dev->tq.next = NULL;
dev->tq.sync = 0;
dev->tq.routine = i810_dma_schedule_tq_wrapper;
dev->tq.data = dev;
/* Before installing handler */
/* TODO */
/* Install handler */
if ((retcode = request_irq(dev->irq,
i810_dma_service,
0,
dev->devname,
dev))) {
down(&dev->struct_sem);
dev->irq = 0;
up(&dev->struct_sem);
return retcode;
}
/* After installing handler */
/* TODO */
return 0;
}
int i810_irq_uninstall(drm_device_t *dev)
{
int irq;
down(&dev->struct_sem);
irq = dev->irq;
dev->irq = 0;
up(&dev->struct_sem);
if (!irq) return -EINVAL;
DRM_DEBUG("%d\n", irq);
/* TODO : Disable interrupts */
free_irq(irq, dev);
return 0;
}
int i810_control(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_control_t ctl;
int retcode;
printk(KERN_INFO "i810_control\n");
i810_dma_init(dev);
copy_from_user_ret(&ctl, (drm_control_t *)arg, sizeof(ctl), -EFAULT);
switch (ctl.func) {
case DRM_INST_HANDLER:
if ((retcode = i810_irq_install(dev, ctl.irq)))
return retcode;
break;
case DRM_UNINST_HANDLER:
if ((retcode = i810_irq_uninstall(dev)))
return retcode;
break;
default:
return -EINVAL;
}
return 0;
}
int i810_lock(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
DECLARE_WAITQUEUE(entry, current);
int ret = 0;
drm_lock_t lock;
drm_queue_t *q;
#if DRM_DMA_HISTOGRAM
cycles_t start;
dev->lck_start = start = get_cycles();
#endif
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
current->pid, lock.context);
return -EINVAL;
}
if (lock.context < 0 || lock.context >= dev->queue_count) {
return -EINVAL;
}
q = dev->queuelist[lock.context];
ret = drm_flush_block_and_flush(dev, lock.context, lock.flags);
if (!ret) {
if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
!= lock.context) {
long j = jiffies - dev->lock.lock_time;
if (j > 0 && j <= DRM_LOCK_SLICE) {
/* Can't take lock if we just had it and
there is contention. */
current->state = TASK_INTERRUPTIBLE;
schedule_timeout(j);
}
}
add_wait_queue(&dev->lock.lock_queue, &entry);
for (;;) {
if (!dev->lock.hw_lock) {
/* Device has been unregistered */
ret = -EINTR;
break;
}
if (drm_lock_take(&dev->lock.hw_lock->lock,
lock.context)) {
dev->lock.pid = current->pid;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->total_locks);
atomic_inc(&q->total_locks);
break; /* Got lock */
}
/* Contention */
atomic_inc(&dev->total_sleeps);
current->state = TASK_INTERRUPTIBLE;
schedule();
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
}
current->state = TASK_RUNNING;
remove_wait_queue(&dev->lock.lock_queue, &entry);
}
drm_flush_unblock(dev, lock.context, lock.flags); /* cleanup phase */
if (!ret) {
if (lock.flags & _DRM_LOCK_READY)
i810_dma_ready(dev);
if (lock.flags & _DRM_LOCK_QUIESCENT)
i810_dma_quiescent(dev);
}
#if DRM_DMA_HISTOGRAM
atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]);
#endif
return ret;
}

583
linux-core/i810_drv.c Normal file
View File

@ -0,0 +1,583 @@
/* i810_drv.c -- I810 driver -*- linux-c -*-
* Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
* Jeff Hartmann <jhartmann@precisioninsight.com>
*
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/i810_drv.c,v 1.1 2000/02/11 17:26:05 dawes Exp $
*
*/
#define EXPORT_SYMTAB
#include "drmP.h"
#include "i810_drv.h"
EXPORT_SYMBOL(i810_init);
EXPORT_SYMBOL(i810_cleanup);
#define I810_NAME "i810"
#define I810_DESC "Matrox g200/g400"
#define I810_DATE "19991213"
#define I810_MAJOR 0
#define I810_MINOR 0
#define I810_PATCHLEVEL 1
static drm_device_t i810_device;
drm_ctx_t i810_res_ctx;
static struct file_operations i810_fops = {
open: i810_open,
flush: drm_flush,
release: i810_release,
ioctl: i810_ioctl,
mmap: drm_mmap,
read: drm_read,
fasync: drm_fasync,
};
static struct miscdevice i810_misc = {
minor: MISC_DYNAMIC_MINOR,
name: I810_NAME,
fops: &i810_fops,
};
static drm_ioctl_desc_t i810_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { i810_version, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { i810_control, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { i810_addbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { i810_markbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { i810_infobufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { i810_mapbufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { i810_freebufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { drm_addctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { drm_rmctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { drm_modctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { drm_getctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { drm_switchctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { drm_newctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { drm_resctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { i810_dma, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { i810_lock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { i810_unlock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
};
#define I810_IOCTL_COUNT DRM_ARRAY_SIZE(i810_ioctls)
#ifdef MODULE
int init_module(void);
void cleanup_module(void);
static char *i810 = NULL;
MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas.");
MODULE_DESCRIPTION("Intel I810");
MODULE_PARM(i810, "s");
/* init_module is called when insmod is used to load the module */
int init_module(void)
{
printk("doing i810_init()\n");
return i810_init();
}
/* cleanup_module is called when rmmod is used to unload the module */
void cleanup_module(void)
{
i810_cleanup();
}
#endif
#ifndef MODULE
/* i810_setup is called by the kernel to parse command-line options passed
* via the boot-loader (e.g., LILO). It calls the insmod option routine,
* drm_parse_drm.
*
* This is not currently supported, since it requires changes to
* linux/init/main.c. */
void __init i810_setup(char *str, int *ints)
{
if (ints[0] != 0) {
DRM_ERROR("Illegal command line format, ignored\n");
return;
}
drm_parse_options(str);
}
#endif
static int i810_setup(drm_device_t *dev)
{
int i;
atomic_set(&dev->ioctl_count, 0);
atomic_set(&dev->vma_count, 0);
dev->buf_use = 0;
atomic_set(&dev->buf_alloc, 0);
drm_dma_setup(dev);
atomic_set(&dev->total_open, 0);
atomic_set(&dev->total_close, 0);
atomic_set(&dev->total_ioctl, 0);
atomic_set(&dev->total_irq, 0);
atomic_set(&dev->total_ctx, 0);
atomic_set(&dev->total_locks, 0);
atomic_set(&dev->total_unlocks, 0);
atomic_set(&dev->total_contends, 0);
atomic_set(&dev->total_sleeps, 0);
for (i = 0; i < DRM_HASH_SIZE; i++) {
dev->magiclist[i].head = NULL;
dev->magiclist[i].tail = NULL;
}
dev->maplist = NULL;
dev->map_count = 0;
dev->vmalist = NULL;
dev->lock.hw_lock = NULL;
init_waitqueue_head(&dev->lock.lock_queue);
dev->queue_count = 0;
dev->queue_reserved = 0;
dev->queue_slots = 0;
dev->queuelist = NULL;
dev->irq = 0;
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma_flag = 0;
dev->last_context = 0;
dev->last_switch = 0;
dev->last_checked = 0;
init_timer(&dev->timer);
init_waitqueue_head(&dev->context_wait);
#if DRM_DMA_HISTO
memset(&dev->histo, 0, sizeof(dev->histo));
#endif
dev->ctx_start = 0;
dev->lck_start = 0;
dev->buf_rp = dev->buf;
dev->buf_wp = dev->buf;
dev->buf_end = dev->buf + DRM_BSZ;
dev->buf_async = NULL;
init_waitqueue_head(&dev->buf_readers);
init_waitqueue_head(&dev->buf_writers);
DRM_DEBUG("\n");
/* The kernel's context could be created here, but is now created
in drm_dma_enqueue. This is more resource-efficient for
hardware that does not do DMA, but may mean that
drm_select_queue fails between the time the interrupt is
initialized and the time the queues are initialized. */
return 0;
}
static int i810_takedown(drm_device_t *dev)
{
int i;
drm_magic_entry_t *pt, *next;
drm_map_t *map;
drm_vma_entry_t *vma, *vma_next;
DRM_DEBUG("\n");
if (dev->irq) i810_irq_uninstall(dev);
down(&dev->struct_sem);
del_timer(&dev->timer);
if (dev->devname) {
drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
dev->devname = NULL;
}
if (dev->unique) {
drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
dev->unique = NULL;
dev->unique_len = 0;
}
/* Clear pid list */
for (i = 0; i < DRM_HASH_SIZE; i++) {
for (pt = dev->magiclist[i].head; pt; pt = next) {
next = pt->next;
drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
}
dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
}
/* Clear AGP information */
if (dev->agp) {
drm_agp_mem_t *entry;
drm_agp_mem_t *nexte;
/* Remove AGP resources, but leave dev->agp
intact until r128_cleanup is called. */
for (entry = dev->agp->memory; entry; entry = nexte) {
nexte = entry->next;
if (entry->bound) drm_unbind_agp(entry->memory);
drm_free_agp(entry->memory, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
}
dev->agp->memory = NULL;
if (dev->agp->acquired && drm_agp.release)
(*drm_agp.release)();
dev->agp->acquired = 0;
dev->agp->enabled = 0;
}
/* Clear vma list (only built for debugging) */
if (dev->vmalist) {
for (vma = dev->vmalist; vma; vma = vma_next) {
vma_next = vma->next;
drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
dev->vmalist = NULL;
}
/* Clear map area and mtrr information */
if (dev->maplist) {
for (i = 0; i < dev->map_count; i++) {
map = dev->maplist[i];
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
#ifdef CONFIG_MTRR
if (map->mtrr >= 0) {
int retcode;
retcode = mtrr_del(map->mtrr,
map->offset,
map->size);
DRM_DEBUG("mtrr_del = %d\n", retcode);
}
#endif
drm_ioremapfree(map->handle, map->size);
break;
case _DRM_SHM:
drm_free_pages((unsigned long)map->handle,
drm_order(map->size)
- PAGE_SHIFT,
DRM_MEM_SAREA);
break;
case _DRM_AGP:
break;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
drm_free(dev->maplist,
dev->map_count * sizeof(*dev->maplist),
DRM_MEM_MAPS);
dev->maplist = NULL;
dev->map_count = 0;
}
if (dev->queuelist) {
for (i = 0; i < dev->queue_count; i++) {
drm_waitlist_destroy(&dev->queuelist[i]->waitlist);
if (dev->queuelist[i]) {
drm_free(dev->queuelist[i],
sizeof(*dev->queuelist[0]),
DRM_MEM_QUEUES);
dev->queuelist[i] = NULL;
}
}
drm_free(dev->queuelist,
dev->queue_slots * sizeof(*dev->queuelist),
DRM_MEM_QUEUES);
dev->queuelist = NULL;
}
drm_dma_takedown(dev);
dev->queue_count = 0;
if (dev->lock.hw_lock) {
dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.pid = 0;
wake_up_interruptible(&dev->lock.lock_queue);
}
up(&dev->struct_sem);
return 0;
}
/* i810_init is called via init_module at module load time, or via
* linux/init/main.c (this is not currently supported). */
int i810_init(void)
{
int retcode;
drm_device_t *dev = &i810_device;
DRM_DEBUG("\n");
memset((void *)dev, 0, sizeof(*dev));
dev->count_lock = SPIN_LOCK_UNLOCKED;
sema_init(&dev->struct_sem, 1);
#ifdef MODULE
drm_parse_options(i810);
#endif
printk("doing misc_register\n");
if ((retcode = misc_register(&i810_misc))) {
DRM_ERROR("Cannot register \"%s\"\n", I810_NAME);
return retcode;
}
dev->device = MKDEV(MISC_MAJOR, i810_misc.minor);
dev->name = I810_NAME;
printk("doing mem init\n");
drm_mem_init();
printk("doing proc init\n");
drm_proc_init(dev);
printk("doing agp init\n");
dev->agp = drm_agp_init();
printk("doing ctxbitmap init\n");
if((retcode = drm_ctxbitmap_init(dev))) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
drm_proc_cleanup();
misc_deregister(&i810_misc);
i810_takedown(dev);
return retcode;
}
#if 0
printk("doing i810_dma_init\n");
i810_dma_init(dev);
#endif
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
I810_NAME,
I810_MAJOR,
I810_MINOR,
I810_PATCHLEVEL,
I810_DATE,
i810_misc.minor);
return 0;
}
/* i810_cleanup is called via cleanup_module at module unload time. */
void i810_cleanup(void)
{
drm_device_t *dev = &i810_device;
DRM_DEBUG("\n");
drm_proc_cleanup();
if (misc_deregister(&i810_misc)) {
DRM_ERROR("Cannot unload module\n");
} else {
DRM_INFO("Module unloaded\n");
}
drm_ctxbitmap_cleanup(dev);
i810_dma_cleanup(dev);
i810_takedown(dev);
if (dev->agp) {
drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
dev->agp = NULL;
}
}
int i810_version(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_version_t version;
int len;
copy_from_user_ret(&version,
(drm_version_t *)arg,
sizeof(version),
-EFAULT);
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
copy_to_user_ret(name, value, len, -EFAULT); \
}
version.version_major = I810_MAJOR;
version.version_minor = I810_MINOR;
version.version_patchlevel = I810_PATCHLEVEL;
DRM_COPY(version.name, I810_NAME);
DRM_COPY(version.date, I810_DATE);
DRM_COPY(version.desc, I810_DESC);
copy_to_user_ret((drm_version_t *)arg,
&version,
sizeof(version),
-EFAULT);
return 0;
}
int i810_open(struct inode *inode, struct file *filp)
{
drm_device_t *dev = &i810_device;
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (!(retcode = drm_open_helper(inode, filp, dev))) {
MOD_INC_USE_COUNT;
atomic_inc(&dev->total_open);
spin_lock(&dev->count_lock);
if (!dev->open_count++) {
spin_unlock(&dev->count_lock);
return i810_setup(dev);
}
spin_unlock(&dev->count_lock);
}
return retcode;
}
int i810_release(struct inode *inode, struct file *filp)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (!(retcode = drm_release(inode, filp))) {
MOD_DEC_USE_COUNT;
atomic_inc(&dev->total_close);
spin_lock(&dev->count_lock);
if (!--dev->open_count) {
if (atomic_read(&dev->ioctl_count) || dev->blocked) {
DRM_ERROR("Device busy: %d %d\n",
atomic_read(&dev->ioctl_count),
dev->blocked);
spin_unlock(&dev->count_lock);
return -EBUSY;
}
spin_unlock(&dev->count_lock);
return i810_takedown(dev);
}
spin_unlock(&dev->count_lock);
}
return retcode;
}
/* drm_ioctl is called whenever a process performs an ioctl on /dev/drm. */
int i810_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
int nr = DRM_IOCTL_NR(cmd);
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int retcode = 0;
drm_ioctl_desc_t *ioctl;
drm_ioctl_t *func;
atomic_inc(&dev->ioctl_count);
atomic_inc(&dev->total_ioctl);
++priv->ioctl_count;
DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
current->pid, cmd, nr, dev->device, priv->authenticated);
if (nr >= I810_IOCTL_COUNT) {
retcode = -EINVAL;
} else {
ioctl = &i810_ioctls[nr];
func = ioctl->func;
if (!func) {
DRM_DEBUG("no function\n");
retcode = -EINVAL;
} else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
|| (ioctl->auth_needed && !priv->authenticated)) {
retcode = -EACCES;
} else {
retcode = (func)(inode, filp, cmd, arg);
}
}
atomic_dec(&dev->ioctl_count);
return retcode;
}
int i810_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_lock_t lock;
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
current->pid, lock.context);
return -EINVAL;
}
DRM_DEBUG("%d frees lock (%d holds)\n",
lock.context,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
atomic_inc(&dev->total_unlocks);
if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
atomic_inc(&dev->total_contends);
drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
i810_dma_schedule(dev, 1);
if (!dev->context_flag) {
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
}
#if DRM_DMA_HISTOGRAM
atomic_inc(&dev->histo.lhld[drm_histogram_slot(get_cycles()
- dev->lck_start)]);
#endif
return 0;
}

76
linux-core/i810_drv.h Normal file
View File

@ -0,0 +1,76 @@
/* i810_drv.h -- Private header for the Matrox g200/g400 driver -*- linux-c -*-
* Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
* Jeff Hartmann <jhartmann@precisioninsight.com>
*
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/i810_drv.h,v 1.1 2000/02/11 17:26:05 dawes Exp $
*/
#ifndef _I810_DRV_H_
#define _I810_DRV_H_
/* i810_drv.c */
extern int i810_init(void);
extern void i810_cleanup(void);
extern int i810_version(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_open(struct inode *inode, struct file *filp);
extern int i810_release(struct inode *inode, struct file *filp);
extern int i810_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_unlock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* i810_dma.c */
extern int i810_dma_schedule(drm_device_t *dev, int locked);
extern int i810_dma(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_irq_install(drm_device_t *dev, int irq);
extern int i810_irq_uninstall(drm_device_t *dev);
extern int i810_control(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_lock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern void i810_dma_init(drm_device_t *dev);
extern void i810_dma_cleanup(drm_device_t *dev);
/* i810_bufs.c */
extern int i810_addbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_infobufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_markbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_freebufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_mapbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_addmap(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
#endif

576
linux-core/mga_drv.c Normal file
View File

@ -0,0 +1,576 @@
/* mga_drv.c -- Matrox g200/g400 driver -*- linux-c -*-
* Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
* Jeff Hartmann <jhartmann@precisioninsight.com>
*
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_drv.c,v 1.1 2000/02/11 17:26:07 dawes Exp $
*
*/
#define EXPORT_SYMTAB
#include "drmP.h"
#include "mga_drv.h"
EXPORT_SYMBOL(mga_init);
EXPORT_SYMBOL(mga_cleanup);
#define MGA_NAME "mga"
#define MGA_DESC "Matrox g200/g400"
#define MGA_DATE "19991213"
#define MGA_MAJOR 0
#define MGA_MINOR 0
#define MGA_PATCHLEVEL 1
static drm_device_t mga_device;
drm_ctx_t mga_res_ctx;
static struct file_operations mga_fops = {
open: mga_open,
flush: drm_flush,
release: mga_release,
ioctl: mga_ioctl,
mmap: drm_mmap,
read: drm_read,
fasync: drm_fasync,
};
static struct miscdevice mga_misc = {
minor: MISC_DYNAMIC_MINOR,
name: MGA_NAME,
fops: &mga_fops,
};
static drm_ioctl_desc_t mga_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { mga_version, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { mga_control, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { mga_addbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { mga_markbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { mga_infobufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { mga_mapbufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { mga_freebufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { mga_addctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { mga_rmctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { mga_modctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { mga_getctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { mga_switchctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { mga_newctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { mga_resctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { mga_dma, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { mga_lock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { mga_unlock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_INIT)] = { mga_dma_init, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_SWAP)] = { mga_clear_bufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_CLEAR)] = { mga_swap_bufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_ILOAD)] = { mga_iload, 1, 1 },
};
#define MGA_IOCTL_COUNT DRM_ARRAY_SIZE(mga_ioctls)
#ifdef MODULE
int init_module(void);
void cleanup_module(void);
static char *mga = NULL;
MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas.");
MODULE_DESCRIPTION("Matrox g200/g400");
MODULE_PARM(mga, "s");
/* init_module is called when insmod is used to load the module */
int init_module(void)
{
DRM_DEBUG("doing mga_init()\n");
return mga_init();
}
/* cleanup_module is called when rmmod is used to unload the module */
void cleanup_module(void)
{
mga_cleanup();
}
#endif
#ifndef MODULE
/* mga_setup is called by the kernel to parse command-line options passed
* via the boot-loader (e.g., LILO). It calls the insmod option routine,
* drm_parse_drm.
*
* This is not currently supported, since it requires changes to
* linux/init/main.c. */
void __init mga_setup(char *str, int *ints)
{
if (ints[0] != 0) {
DRM_ERROR("Illegal command line format, ignored\n");
return;
}
drm_parse_options(str);
}
#endif
static int mga_setup(drm_device_t *dev)
{
int i;
atomic_set(&dev->ioctl_count, 0);
atomic_set(&dev->vma_count, 0);
dev->buf_use = 0;
atomic_set(&dev->buf_alloc, 0);
drm_dma_setup(dev);
atomic_set(&dev->total_open, 0);
atomic_set(&dev->total_close, 0);
atomic_set(&dev->total_ioctl, 0);
atomic_set(&dev->total_irq, 0);
atomic_set(&dev->total_ctx, 0);
atomic_set(&dev->total_locks, 0);
atomic_set(&dev->total_unlocks, 0);
atomic_set(&dev->total_contends, 0);
atomic_set(&dev->total_sleeps, 0);
for (i = 0; i < DRM_HASH_SIZE; i++) {
dev->magiclist[i].head = NULL;
dev->magiclist[i].tail = NULL;
}
dev->maplist = NULL;
dev->map_count = 0;
dev->vmalist = NULL;
dev->lock.hw_lock = NULL;
init_waitqueue_head(&dev->lock.lock_queue);
dev->queue_count = 0;
dev->queue_reserved = 0;
dev->queue_slots = 0;
dev->queuelist = NULL;
dev->irq = 0;
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma_flag = 0;
dev->last_context = 0;
dev->last_switch = 0;
dev->last_checked = 0;
init_timer(&dev->timer);
init_waitqueue_head(&dev->context_wait);
dev->ctx_start = 0;
dev->lck_start = 0;
dev->buf_rp = dev->buf;
dev->buf_wp = dev->buf;
dev->buf_end = dev->buf + DRM_BSZ;
dev->buf_async = NULL;
init_waitqueue_head(&dev->buf_readers);
init_waitqueue_head(&dev->buf_writers);
DRM_DEBUG("\n");
/* The kernel's context could be created here, but is now created
in drm_dma_enqueue. This is more resource-efficient for
hardware that does not do DMA, but may mean that
drm_select_queue fails between the time the interrupt is
initialized and the time the queues are initialized. */
return 0;
}
static int mga_takedown(drm_device_t *dev)
{
int i;
drm_magic_entry_t *pt, *next;
drm_map_t *map;
drm_vma_entry_t *vma, *vma_next;
DRM_DEBUG("\n");
if (dev->irq) mga_irq_uninstall(dev);
down(&dev->struct_sem);
del_timer(&dev->timer);
if (dev->devname) {
drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
dev->devname = NULL;
}
if (dev->unique) {
drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
dev->unique = NULL;
dev->unique_len = 0;
}
/* Clear pid list */
for (i = 0; i < DRM_HASH_SIZE; i++) {
for (pt = dev->magiclist[i].head; pt; pt = next) {
next = pt->next;
drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
}
dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
}
/* Clear AGP information */
if (dev->agp) {
drm_agp_mem_t *entry;
drm_agp_mem_t *nexte;
/* Remove AGP resources, but leave dev->agp
intact until cleanup is called. */
for (entry = dev->agp->memory; entry; entry = nexte) {
nexte = entry->next;
if (entry->bound) drm_unbind_agp(entry->memory);
drm_free_agp(entry->memory, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
}
dev->agp->memory = NULL;
if (dev->agp->acquired && drm_agp.release)
(*drm_agp.release)();
dev->agp->acquired = 0;
dev->agp->enabled = 0;
}
/* Clear vma list (only built for debugging) */
if (dev->vmalist) {
for (vma = dev->vmalist; vma; vma = vma_next) {
vma_next = vma->next;
drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
dev->vmalist = NULL;
}
/* Clear map area and mtrr information */
if (dev->maplist) {
for (i = 0; i < dev->map_count; i++) {
map = dev->maplist[i];
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
#ifdef CONFIG_MTRR
if (map->mtrr >= 0) {
int retcode;
retcode = mtrr_del(map->mtrr,
map->offset,
map->size);
DRM_DEBUG("mtrr_del = %d\n", retcode);
}
#endif
drm_ioremapfree(map->handle, map->size);
break;
case _DRM_SHM:
drm_free_pages((unsigned long)map->handle,
drm_order(map->size)
- PAGE_SHIFT,
DRM_MEM_SAREA);
break;
case _DRM_AGP:
break;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
drm_free(dev->maplist,
dev->map_count * sizeof(*dev->maplist),
DRM_MEM_MAPS);
dev->maplist = NULL;
dev->map_count = 0;
}
if (dev->queuelist) {
for (i = 0; i < dev->queue_count; i++) {
drm_waitlist_destroy(&dev->queuelist[i]->waitlist);
if (dev->queuelist[i]) {
drm_free(dev->queuelist[i],
sizeof(*dev->queuelist[0]),
DRM_MEM_QUEUES);
dev->queuelist[i] = NULL;
}
}
drm_free(dev->queuelist,
dev->queue_slots * sizeof(*dev->queuelist),
DRM_MEM_QUEUES);
dev->queuelist = NULL;
}
drm_dma_takedown(dev);
dev->queue_count = 0;
if (dev->lock.hw_lock) {
dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.pid = 0;
wake_up_interruptible(&dev->lock.lock_queue);
}
up(&dev->struct_sem);
return 0;
}
/* mga_init is called via init_module at module load time, or via
* linux/init/main.c (this is not currently supported). */
int mga_init(void)
{
int retcode;
drm_device_t *dev = &mga_device;
DRM_DEBUG("\n");
memset((void *)dev, 0, sizeof(*dev));
dev->count_lock = SPIN_LOCK_UNLOCKED;
sema_init(&dev->struct_sem, 1);
#ifdef MODULE
drm_parse_options(mga);
#endif
DRM_DEBUG("doing misc_register\n");
if ((retcode = misc_register(&mga_misc))) {
DRM_ERROR("Cannot register \"%s\"\n", MGA_NAME);
return retcode;
}
dev->device = MKDEV(MISC_MAJOR, mga_misc.minor);
dev->name = MGA_NAME;
DRM_DEBUG("doing mem init\n");
drm_mem_init();
DRM_DEBUG("doing proc init\n");
drm_proc_init(dev);
DRM_DEBUG("doing agp init\n");
dev->agp = drm_agp_init();
DRM_DEBUG("doing ctxbitmap init\n");
if((retcode = drm_ctxbitmap_init(dev))) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
drm_proc_cleanup();
misc_deregister(&mga_misc);
mga_takedown(dev);
return retcode;
}
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
MGA_NAME,
MGA_MAJOR,
MGA_MINOR,
MGA_PATCHLEVEL,
MGA_DATE,
mga_misc.minor);
return 0;
}
/* mga_cleanup is called via cleanup_module at module unload time. */
void mga_cleanup(void)
{
drm_device_t *dev = &mga_device;
DRM_DEBUG("\n");
drm_proc_cleanup();
if (misc_deregister(&mga_misc)) {
DRM_ERROR("Cannot unload module\n");
} else {
DRM_INFO("Module unloaded\n");
}
drm_ctxbitmap_cleanup(dev);
mga_dma_cleanup(dev);
mga_takedown(dev);
if (dev->agp) {
drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
dev->agp = NULL;
}
}
int mga_version(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_version_t version;
int len;
copy_from_user_ret(&version,
(drm_version_t *)arg,
sizeof(version),
-EFAULT);
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
copy_to_user_ret(name, value, len, -EFAULT); \
}
version.version_major = MGA_MAJOR;
version.version_minor = MGA_MINOR;
version.version_patchlevel = MGA_PATCHLEVEL;
DRM_COPY(version.name, MGA_NAME);
DRM_COPY(version.date, MGA_DATE);
DRM_COPY(version.desc, MGA_DESC);
copy_to_user_ret((drm_version_t *)arg,
&version,
sizeof(version),
-EFAULT);
return 0;
}
int mga_open(struct inode *inode, struct file *filp)
{
drm_device_t *dev = &mga_device;
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (!(retcode = drm_open_helper(inode, filp, dev))) {
MOD_INC_USE_COUNT;
atomic_inc(&dev->total_open);
spin_lock(&dev->count_lock);
if (!dev->open_count++) {
spin_unlock(&dev->count_lock);
return mga_setup(dev);
}
spin_unlock(&dev->count_lock);
}
return retcode;
}
int mga_release(struct inode *inode, struct file *filp)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (!(retcode = drm_release(inode, filp))) {
MOD_DEC_USE_COUNT;
atomic_inc(&dev->total_close);
spin_lock(&dev->count_lock);
if (!--dev->open_count) {
if (atomic_read(&dev->ioctl_count) || dev->blocked) {
DRM_ERROR("Device busy: %d %d\n",
atomic_read(&dev->ioctl_count),
dev->blocked);
spin_unlock(&dev->count_lock);
return -EBUSY;
}
spin_unlock(&dev->count_lock);
return mga_takedown(dev);
}
spin_unlock(&dev->count_lock);
}
return retcode;
}
/* drm_ioctl is called whenever a process performs an ioctl on /dev/drm. */
int mga_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
int nr = DRM_IOCTL_NR(cmd);
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int retcode = 0;
drm_ioctl_desc_t *ioctl;
drm_ioctl_t *func;
atomic_inc(&dev->ioctl_count);
atomic_inc(&dev->total_ioctl);
++priv->ioctl_count;
DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
current->pid, cmd, nr, dev->device, priv->authenticated);
if (nr >= MGA_IOCTL_COUNT) {
retcode = -EINVAL;
} else {
ioctl = &mga_ioctls[nr];
func = ioctl->func;
if (!func) {
DRM_DEBUG("no function\n");
retcode = -EINVAL;
} else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
|| (ioctl->auth_needed && !priv->authenticated)) {
retcode = -EACCES;
} else {
retcode = (func)(inode, filp, cmd, arg);
}
}
atomic_dec(&dev->ioctl_count);
return retcode;
}
int mga_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_lock_t lock;
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
current->pid, lock.context);
return -EINVAL;
}
DRM_DEBUG("%d frees lock (%d holds)\n",
lock.context,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
atomic_inc(&dev->total_unlocks);
if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
atomic_inc(&dev->total_contends);
drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
mga_dma_schedule(dev, 1);
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
return 0;
}

View File

@ -9,7 +9,7 @@
# Note 2! The CFLAGS definitions are now inherited from the
# parent makes..
#
# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/Makefile.kernel,v 1.2 1999/09/27 14:59:24 dawes Exp $
# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/Makefile.kernel,v 1.5 2000/02/14 06:27:25 martin Exp $
L_TARGET := libdrm.a
@ -22,7 +22,14 @@ ifdef CONFIG_DRM_GAMMA
M_OBJS += gamma.o
endif
ifdef CONFIG_DRM_TDFX
M_OBJS += tdfx.o
endif
include $(TOPDIR)/Rules.make
gamma.o: gamma_drv.o gamma_dma.o $(L_TARGET)
$(LD) $(LD_RFLAG) -r -o $@ gamma_drv.o gamma_dma.o -L. -ldrm
tdfx.o: tdfx_drv.o tdfx_context.o $(L_TARGET)
$(LD) $(LD_RFLAG) -r -o $@ tdfx_drv.o tdfx_context.o -L. -ldrm

313
linux/agpsupport.c Normal file
View File

@ -0,0 +1,313 @@
/* agpsupport.c -- DRM support for AGP/GART backend -*- linux-c -*-
* Created: Mon Dec 13 09:56:45 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Rickard E. (Rik) Faith <faith@precisioninsight.com>
*
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/agpsupport.c,v 1.1 2000/02/11 17:26:02 dawes Exp $
*
*/
#define __NO_VERSION__
#include "drmP.h"
drm_agp_func_t drm_agp = { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL };
/* The C standard says that 'void *' is not guaranteed to hold a function
pointer, so we use this union to define a generic pointer that is
guaranteed to hold any of the function pointers we care about. */
typedef union {
void (*free_memory)(agp_memory *);
agp_memory *(*allocate_memory)(size_t, u32);
int (*bind_memory)(agp_memory *, off_t);
int (*unbind_memory)(agp_memory *);
void (*enable)(u32);
int (*acquire)(void);
void (*release)(void);
void (*copy_info)(agp_kern_info *);
unsigned long address;
} drm_agp_func_u;
typedef struct drm_agp_fill {
const char *name;
drm_agp_func_u *f;
} drm_agp_fill_t;
static drm_agp_fill_t drm_agp_fill[] = {
{ __MODULE_STRING(agp_free_memory),
(drm_agp_func_u *)&drm_agp.free_memory },
{ __MODULE_STRING(agp_allocate_memory),
(drm_agp_func_u *)&drm_agp.allocate_memory },
{ __MODULE_STRING(agp_bind_memory),
(drm_agp_func_u *)&drm_agp.bind_memory },
{ __MODULE_STRING(agp_unbind_memory),
(drm_agp_func_u *)&drm_agp.unbind_memory },
{ __MODULE_STRING(agp_enable),
(drm_agp_func_u *)&drm_agp.enable },
{ __MODULE_STRING(agp_backend_acquire),
(drm_agp_func_u *)&drm_agp.acquire },
{ __MODULE_STRING(agp_backend_release),
(drm_agp_func_u *)&drm_agp.release },
{ __MODULE_STRING(agp_copy_info),
(drm_agp_func_u *)&drm_agp.copy_info },
{ NULL, NULL }
};
int drm_agp_info(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
agp_kern_info *kern;
drm_agp_info_t info;
if (!dev->agp->acquired || !drm_agp.copy_info) return -EINVAL;
kern = &dev->agp->agp_info;
info.agp_version_major = kern->version.major;
info.agp_version_minor = kern->version.minor;
info.mode = kern->mode;
info.aperture_base = kern->aper_base;
info.aperture_size = kern->aper_size * 1024 * 1024;
info.memory_allowed = kern->max_memory << PAGE_SHIFT;
info.memory_used = kern->current_memory << PAGE_SHIFT;
info.id_vendor = kern->device->vendor;
info.id_device = kern->device->device;
copy_to_user_ret((drm_agp_info_t *)arg, &info, sizeof(info), -EFAULT);
return 0;
}
int drm_agp_acquire(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int retcode;
if (dev->agp->acquired || !drm_agp.acquire) return -EINVAL;
if ((retcode = (*drm_agp.acquire)())) return retcode;
dev->agp->acquired = 1;
return 0;
}
int drm_agp_release(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
if (!dev->agp->acquired || !drm_agp.release) return -EINVAL;
(*drm_agp.release)();
dev->agp->acquired = 0;
return 0;
}
int drm_agp_enable(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_agp_mode_t mode;
if (!dev->agp->acquired || !drm_agp.enable) return -EINVAL;
copy_from_user_ret(&mode, (drm_agp_mode_t *)arg, sizeof(mode),
-EFAULT);
dev->agp->mode = mode.mode;
(*drm_agp.enable)(mode.mode);
dev->agp->base = dev->agp->agp_info.aper_base;
dev->agp->enabled = 1;
return 0;
}
int drm_agp_alloc(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_agp_buffer_t request;
drm_agp_mem_t *entry;
agp_memory *memory;
unsigned long pages;
u32 type;
if (!dev->agp->acquired) return -EINVAL;
copy_from_user_ret(&request, (drm_agp_buffer_t *)arg, sizeof(request),
-EFAULT);
if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS)))
return -ENOMEM;
pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE;
type = (u32) request.type;
if (!(memory = drm_alloc_agp(pages, type))) {
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
return -ENOMEM;
}
entry->handle = (unsigned long)memory->memory;
entry->memory = memory;
entry->bound = 0;
entry->pages = pages;
entry->prev = NULL;
entry->next = dev->agp->memory;
if (dev->agp->memory) dev->agp->memory->prev = entry;
dev->agp->memory = entry;
request.handle = entry->handle;
request.physical = memory->physical;
if (copy_to_user((drm_agp_buffer_t *)arg, &request, sizeof(request))) {
dev->agp->memory = entry->next;
dev->agp->memory->prev = NULL;
drm_free_agp(memory, pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
return -EFAULT;
}
return 0;
}
static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t *dev,
unsigned long handle)
{
drm_agp_mem_t *entry;
for (entry = dev->agp->memory; entry; entry = entry->next) {
if (entry->handle == handle) return entry;
}
return NULL;
}
int drm_agp_unbind(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_agp_binding_t request;
drm_agp_mem_t *entry;
if (!dev->agp->acquired) return -EINVAL;
copy_from_user_ret(&request, (drm_agp_binding_t *)arg, sizeof(request),
-EFAULT);
if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
return -EINVAL;
if (!entry->bound) return -EINVAL;
return drm_unbind_agp(entry->memory);
}
int drm_agp_bind(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_agp_binding_t request;
drm_agp_mem_t *entry;
int retcode;
int page;
if (!dev->agp->acquired || !drm_agp.bind_memory) return -EINVAL;
copy_from_user_ret(&request, (drm_agp_binding_t *)arg, sizeof(request),
-EFAULT);
if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
return -EINVAL;
if (entry->bound) return -EINVAL;
page = (request.offset + PAGE_SIZE - 1) / PAGE_SIZE;
if ((retcode = drm_bind_agp(entry->memory, page))) return retcode;
entry->bound = dev->agp->base + (page << PAGE_SHIFT);
return 0;
}
int drm_agp_free(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_agp_buffer_t request;
drm_agp_mem_t *entry;
if (!dev->agp->acquired) return -EINVAL;
copy_from_user_ret(&request, (drm_agp_buffer_t *)arg, sizeof(request),
-EFAULT);
if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
return -EINVAL;
if (entry->bound) drm_unbind_agp(entry->memory);
entry->prev->next = entry->next;
entry->next->prev = entry->prev;
drm_free_agp(entry->memory, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
return 0;
}
drm_agp_head_t *drm_agp_init(void)
{
drm_agp_fill_t *fill;
drm_agp_head_t *head = NULL;
int agp_available = 1;
for (fill = &drm_agp_fill[0]; fill->name; fill++) {
char *n = (char *)fill->name;
#if 0
*fill->f = (drm_agp_func_u)get_module_symbol(NULL, n);
#endif
*fill->f = (drm_agp_func_u)get_module_symbol(NULL, n);
printk("%s resolves to 0x%08lx\n", n, (*fill->f).address);
if (!(*fill->f).address) agp_available = 0;
}
printk("agp_available = %d\n", agp_available);
if (agp_available) {
if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS)))
return NULL;
memset((void *)head, 0, sizeof(*head));
(*drm_agp.copy_info)(&head->agp_info);
head->memory = NULL;
switch (head->agp_info.chipset) {
case INTEL_GENERIC: head->chipset = "Intel"; break;
case INTEL_LX: head->chipset = "Intel 440LX"; break;
case INTEL_BX: head->chipset = "Intel 440BX"; break;
case INTEL_GX: head->chipset = "Intel 440GX"; break;
case INTEL_I810: head->chipset = "Intel i810"; break;
case VIA_GENERIC: head->chipset = "VIA"; break;
case VIA_VP3: head->chipset = "VIA VP3"; break;
case VIA_MVP3: head->chipset = "VIA MVP3"; break;
case VIA_APOLLO_PRO: head->chipset = "VIA Apollo Pro"; break;
case SIS_GENERIC: head->chipset = "SiS"; break;
case AMD_GENERIC: head->chipset = "AMD"; break;
case AMD_IRONGATE: head->chipset = "AMD Irongate"; break;
case ALI_GENERIC: head->chipset = "ALi"; break;
case ALI_M1541: head->chipset = "ALi M1541"; break;
default:
}
DRM_INFO("AGP %d.%d on %s @ 0x%08lx %dMB\n",
head->agp_info.version.major,
head->agp_info.version.minor,
head->chipset,
head->agp_info.aper_base,
head->agp_info.aper_size);
}
return head;
}

View File

@ -1,6 +1,6 @@
/* bufs.c -- IOCTLs to manage buffers -*- linux-c -*-
* Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
* Revised: Fri Dec 3 12:11:11 1999 by faith@precisioninsight.com
* Revised: Mon Feb 14 00:14:11 2000 by kevin@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
@ -25,7 +25,7 @@
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/bufs.c,v 1.8 1999/08/30 13:05:00 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/bufs.c,v 1.2 1999/12/14 01:33:55 robin Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/bufs.c,v 1.4 2000/02/14 06:27:25 martin Exp $
*
*/

86
linux/ctxbitmap.c Normal file
View File

@ -0,0 +1,86 @@
/* ctxbitmap.c -- Context bitmap management -*- linux-c -*-
* Created: Thu Jan 6 03:56:42 2000 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Jeff Hartmann <jhartmann@precisioninsight.com>
*
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/ctxbitmap.c,v 1.1 2000/02/11 17:26:02 dawes Exp $
*
*/
#define __NO_VERSION__
#include "drmP.h"
void drm_ctxbitmap_free(drm_device_t *dev, int ctx_handle)
{
if (ctx_handle < 0) goto failed;
if (ctx_handle < DRM_MAX_CTXBITMAP) {
clear_bit(ctx_handle, dev->ctx_bitmap);
return;
}
failed:
DRM_ERROR("Attempt to free invalid context handle: %d\n",
ctx_handle);
return;
}
int drm_ctxbitmap_next(drm_device_t *dev)
{
int bit;
bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP);
if (bit < DRM_MAX_CTXBITMAP) {
set_bit(bit, dev->ctx_bitmap);
printk("drm_ctxbitmap_next bit : %d\n", bit);
return bit;
}
return -1;
}
int drm_ctxbitmap_init(drm_device_t *dev)
{
int i;
int temp;
dev->ctx_bitmap = (unsigned long *) drm_alloc(PAGE_SIZE * 4,
DRM_MEM_CTXBITMAP);
if(dev->ctx_bitmap == NULL) {
return -ENOMEM;
}
memset((void *) dev->ctx_bitmap, 0, PAGE_SIZE * 4);
for(i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
temp = drm_ctxbitmap_next(dev);
printk("drm_ctxbitmap_init : %d\n", temp);
}
return 0;
}
void drm_ctxbitmap_cleanup(drm_device_t *dev)
{
drm_free((void *)dev->ctx_bitmap, PAGE_SIZE * 4,
DRM_MEM_CTXBITMAP);
}

View File

@ -1,6 +1,6 @@
/* dma.c -- DMA IOCTL and function support -*- linux-c -*-
* Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
* Revised: Thu Sep 16 12:55:39 1999 by faith@precisioninsight.com
* Revised: Sun Feb 13 23:19:45 2000 by kevin@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
@ -25,7 +25,7 @@
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/dma.c,v 1.7 1999/09/16 16:56:18 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/dma.c,v 1.2 1999/12/14 01:33:55 robin Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/dma.c,v 1.4 2000/02/14 06:27:26 martin Exp $
*
*/

View File

@ -1,6 +1,6 @@
/* drm.h -- Header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
* Revised: Mon Dec 6 17:11:19 1999 by faith@precisioninsight.com
* Revised: Mon Feb 14 00:15:23 2000 by kevin@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All rights reserved.
@ -25,7 +25,7 @@
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drm.h,v 1.46 1999/08/20 20:00:53 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drm.h,v 1.2 1999/12/14 01:33:56 robin Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drm.h,v 1.4 2000/02/14 06:27:26 martin Exp $
*
* Acknowledgements:
* Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic cmpxchg.

View File

@ -25,7 +25,7 @@
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/gamma_drv.c,v 1.17 1999/08/30 13:05:00 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/gamma_drv.c,v 1.2 1999/12/14 01:33:57 robin Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/gamma_drv.c,v 1.3 2000/01/20 07:25:35 martin Exp $
*
*/
@ -52,6 +52,7 @@ static struct file_operations gamma_fops = {
mmap: drm_mmap,
read: drm_read,
fasync: drm_fasync,
poll: drm_poll,
};
static struct miscdevice gamma_misc = {

584
linux/i810_bufs.c Normal file
View File

@ -0,0 +1,584 @@
/* i810_bufs.c -- IOCTLs to manage buffers -*- linux-c -*-
* Created: Thu Jan 6 01:47:26 2000 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
* Jeff Hartmann <jhartmann@precisioninsight.com>
*
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/i810_bufs.c,v 1.1 2000/02/11 17:26:04 dawes Exp $
*
*/
#define __NO_VERSION__
#include "drmP.h"
#include "linux/un.h"
int i810_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
drm_buf_entry_t *entry;
drm_buf_t *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
int order;
int size;
int alignment;
int page_order;
int total;
int byte_count;
int i;
if (!dma) return -EINVAL;
copy_from_user_ret(&request,
(drm_buf_desc_t *)arg,
sizeof(request),
-EFAULT);
count = request.count;
order = drm_order(request.size);
size = 1 << order;
agp_offset = request.agp_start;
alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) :size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
byte_count = 0;
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
if (dev->queue_count) return -EBUSY; /* Not while in use */
spin_lock(&dev->count_lock);
if (dev->buf_use) {
spin_unlock(&dev->count_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->count_lock);
down(&dev->struct_sem);
entry = &dma->bufs[order];
if (entry->buf_count) {
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->buf_size = size;
entry->page_order = page_order;
while(entry->buf_count < count) {
for(offset = 0; offset + size <= total && entry->buf_count < count;
offset += alignment, ++entry->buf_count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
buf->offset = agp_offset - dev->agp->base + offset;/* ?? */
buf->bus_address = agp_offset + offset;
buf->address = agp_offset + offset + dev->agp->base;
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
init_waitqueue_head(&buf->dma_wait);
buf->pid = 0;
#if DRM_DMA_HISTOGRAM
buf->time_queued = 0;
buf->time_dispatched = 0;
buf->time_completed = 0;
buf->time_freed = 0;
#endif
DRM_DEBUG("buffer %d @ %p\n",
entry->buf_count, buf->address);
}
byte_count += PAGE_SIZE << page_order;
}
dma->buflist = drm_realloc(dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist),
DRM_MEM_BUFS);
for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
dma->buflist[i] = &entry->buflist[i - dma->buf_count];
dma->buf_count += entry->buf_count;
dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
drm_freelist_create(&entry->freelist, entry->buf_count);
for (i = 0; i < entry->buf_count; i++) {
drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
}
up(&dev->struct_sem);
request.count = entry->buf_count;
request.size = size;
copy_to_user_ret((drm_buf_desc_t *)arg,
&request,
sizeof(request),
-EFAULT);
atomic_dec(&dev->buf_alloc);
return 0;
}
int i810_addbufs_pci(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
int count;
int order;
int size;
int total;
int page_order;
drm_buf_entry_t *entry;
unsigned long page;
drm_buf_t *buf;
int alignment;
unsigned long offset;
int i;
int byte_count;
int page_count;
if (!dma) return -EINVAL;
copy_from_user_ret(&request,
(drm_buf_desc_t *)arg,
sizeof(request),
-EFAULT);
count = request.count;
order = drm_order(request.size);
size = 1 << order;
DRM_DEBUG("count = %d, size = %d (%d), order = %d, queue_count = %d\n",
request.count, request.size, size, order, dev->queue_count);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
if (dev->queue_count) return -EBUSY; /* Not while in use */
alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) :size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
spin_lock(&dev->count_lock);
if (dev->buf_use) {
spin_unlock(&dev->count_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->count_lock);
down(&dev->struct_sem);
entry = &dma->bufs[order];
if (entry->buf_count) {
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
DRM_MEM_SEGS);
if (!entry->seglist) {
drm_free(entry->buflist,
count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
memset(entry->seglist, 0, count * sizeof(*entry->seglist));
dma->pagelist = drm_realloc(dma->pagelist,
dma->page_count * sizeof(*dma->pagelist),
(dma->page_count + (count << page_order))
* sizeof(*dma->pagelist),
DRM_MEM_PAGES);
DRM_DEBUG("pagelist: %d entries\n",
dma->page_count + (count << page_order));
entry->buf_size = size;
entry->page_order = page_order;
byte_count = 0;
page_count = 0;
while (entry->buf_count < count) {
if (!(page = drm_alloc_pages(page_order, DRM_MEM_DMA))) break;
entry->seglist[entry->seg_count++] = page;
for (i = 0; i < (1 << page_order); i++) {
DRM_DEBUG("page %d @ 0x%08lx\n",
dma->page_count + page_count,
page + PAGE_SIZE * i);
dma->pagelist[dma->page_count + page_count++]
= page + PAGE_SIZE * i;
}
for (offset = 0;
offset + size <= total && entry->buf_count < count;
offset += alignment, ++entry->buf_count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
buf->offset = (dma->byte_count + byte_count + offset);
buf->address = (void *)(page + offset);
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
init_waitqueue_head(&buf->dma_wait);
buf->pid = 0;
#if DRM_DMA_HISTOGRAM
buf->time_queued = 0;
buf->time_dispatched = 0;
buf->time_completed = 0;
buf->time_freed = 0;
#endif
DRM_DEBUG("buffer %d @ %p\n",
entry->buf_count, buf->address);
}
byte_count += PAGE_SIZE << page_order;
}
dma->buflist = drm_realloc(dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist),
DRM_MEM_BUFS);
for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
dma->buflist[i] = &entry->buflist[i - dma->buf_count];
dma->buf_count += entry->buf_count;
dma->seg_count += entry->seg_count;
dma->page_count += entry->seg_count << page_order;
dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
drm_freelist_create(&entry->freelist, entry->buf_count);
for (i = 0; i < entry->buf_count; i++) {
drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
}
up(&dev->struct_sem);
request.count = entry->buf_count;
request.size = size;
copy_to_user_ret((drm_buf_desc_t *)arg,
&request,
sizeof(request),
-EFAULT);
atomic_dec(&dev->buf_alloc);
return 0;
}
int i810_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_buf_desc_t request;
copy_from_user_ret(&request,
(drm_buf_desc_t *)arg,
sizeof(request),
-EFAULT);
if(request.flags & _DRM_AGP_BUFFER)
return i810_addbufs_agp(inode, filp, cmd, arg);
else
return i810_addbufs_pci(inode, filp, cmd, arg);
}
int i810_infobufs(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_info_t request;
int i;
int count;
if (!dma) return -EINVAL;
spin_lock(&dev->count_lock);
if (atomic_read(&dev->buf_alloc)) {
spin_unlock(&dev->count_lock);
return -EBUSY;
}
++dev->buf_use; /* Can't allocate more after this call */
spin_unlock(&dev->count_lock);
copy_from_user_ret(&request,
(drm_buf_info_t *)arg,
sizeof(request),
-EFAULT);
for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
if (dma->bufs[i].buf_count) ++count;
}
DRM_DEBUG("count = %d\n", count);
if (request.count >= count) {
for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
if (dma->bufs[i].buf_count) {
copy_to_user_ret(&request.list[count].count,
&dma->bufs[i].buf_count,
sizeof(dma->bufs[0]
.buf_count),
-EFAULT);
copy_to_user_ret(&request.list[count].size,
&dma->bufs[i].buf_size,
sizeof(dma->bufs[0].buf_size),
-EFAULT);
copy_to_user_ret(&request.list[count].low_mark,
&dma->bufs[i]
.freelist.low_mark,
sizeof(dma->bufs[0]
.freelist.low_mark),
-EFAULT);
copy_to_user_ret(&request.list[count]
.high_mark,
&dma->bufs[i]
.freelist.high_mark,
sizeof(dma->bufs[0]
.freelist.high_mark),
-EFAULT);
DRM_DEBUG("%d %d %d %d %d\n",
i,
dma->bufs[i].buf_count,
dma->bufs[i].buf_size,
dma->bufs[i].freelist.low_mark,
dma->bufs[i].freelist.high_mark);
++count;
}
}
}
request.count = count;
copy_to_user_ret((drm_buf_info_t *)arg,
&request,
sizeof(request),
-EFAULT);
return 0;
}
int i810_markbufs(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
int order;
drm_buf_entry_t *entry;
if (!dma) return -EINVAL;
copy_from_user_ret(&request,
(drm_buf_desc_t *)arg,
sizeof(request),
-EFAULT);
DRM_DEBUG("%d, %d, %d\n",
request.size, request.low_mark, request.high_mark);
order = drm_order(request.size);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
entry = &dma->bufs[order];
if (request.low_mark < 0 || request.low_mark > entry->buf_count)
return -EINVAL;
if (request.high_mark < 0 || request.high_mark > entry->buf_count)
return -EINVAL;
entry->freelist.low_mark = request.low_mark;
entry->freelist.high_mark = request.high_mark;
return 0;
}
int i810_freebufs(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_free_t request;
int i;
int idx;
drm_buf_t *buf;
if (!dma) return -EINVAL;
copy_from_user_ret(&request,
(drm_buf_free_t *)arg,
sizeof(request),
-EFAULT);
DRM_DEBUG("%d\n", request.count);
for (i = 0; i < request.count; i++) {
copy_from_user_ret(&idx,
&request.list[i],
sizeof(idx),
-EFAULT);
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("Index %d (of %d max)\n",
idx, dma->buf_count - 1);
return -EINVAL;
}
buf = dma->buflist[idx];
if (buf->pid != current->pid) {
DRM_ERROR("Process %d freeing buffer owned by %d\n",
current->pid, buf->pid);
return -EINVAL;
}
drm_free_buffer(dev, buf);
}
return 0;
}
int i810_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
int retcode = 0;
const int zero = 0;
unsigned long virtual;
unsigned long address;
drm_buf_map_t request;
int i;
if (!dma) return -EINVAL;
DRM_DEBUG("\n");
spin_lock(&dev->count_lock);
if (atomic_read(&dev->buf_alloc)) {
spin_unlock(&dev->count_lock);
return -EBUSY;
}
++dev->buf_use; /* Can't allocate more after this call */
spin_unlock(&dev->count_lock);
copy_from_user_ret(&request,
(drm_buf_map_t *)arg,
sizeof(request),
-EFAULT);
if (request.count >= dma->buf_count) {
if(dma->flags & _DRM_DMA_USE_AGP) {
/* This is an ugly vicious hack */
drm_map_t *map = NULL;
for(i = 0; i < dev->map_count; i++) {
map = dev->maplist[i];
if(map->type == _DRM_AGP) break;
}
if (i >= dev->map_count || !map) {
retcode = -EINVAL;
goto done;
}
virtual = do_mmap(filp, 0, map->size, PROT_READ|PROT_WRITE,
MAP_SHARED, (unsigned long)map->handle);
}
else {
virtual = do_mmap(filp, 0, dma->byte_count,
PROT_READ|PROT_WRITE, MAP_SHARED, 0);
}
if (virtual > -1024UL) {
/* Real error */
retcode = (signed long)virtual;
goto done;
}
request.virtual = (void *)virtual;
for (i = 0; i < dma->buf_count; i++) {
if (copy_to_user(&request.list[i].idx,
&dma->buflist[i]->idx,
sizeof(request.list[0].idx))) {
retcode = -EFAULT;
goto done;
}
if (copy_to_user(&request.list[i].total,
&dma->buflist[i]->total,
sizeof(request.list[0].total))) {
retcode = -EFAULT;
goto done;
}
if (copy_to_user(&request.list[i].used,
&zero,
sizeof(zero))) {
retcode = -EFAULT;
goto done;
}
address = virtual + dma->buflist[i]->offset;
if (copy_to_user(&request.list[i].address,
&address,
sizeof(address))) {
retcode = -EFAULT;
goto done;
}
}
}
done:
request.count = dma->buf_count;
DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
copy_to_user_ret((drm_buf_map_t *)arg,
&request,
sizeof(request),
-EFAULT);
return retcode;
}

762
linux/i810_dma.c Normal file
View File

@ -0,0 +1,762 @@
/* i810_dma.c -- DMA support for the i810 -*- linux-c -*-
* Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
* Jeff Hartmann <jhartmann@precisioninsight.com>
*
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/i810_dma.c,v 1.1 2000/02/11 17:26:04 dawes Exp $
*
*/
#define __NO_VERSION__
#include "drmP.h"
#include "i810_drv.h"
#include <linux/interrupt.h> /* For task queue support */
#define I810_REG(reg) 2
#define I810_BASE(reg) ((unsigned long) \
dev->maplist[I810_REG(reg)]->handle)
#define I810_ADDR(reg) (I810_BASE(reg) + reg)
#define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg)
#define I810_READ(reg) I810_DEREF(reg)
#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
void i810_dma_init(drm_device_t *dev)
{
printk(KERN_INFO "i810_dma_init\n");
}
void i810_dma_cleanup(drm_device_t *dev)
{
printk(KERN_INFO "i810_dma_cleanup\n");
}
static inline void i810_dma_dispatch(drm_device_t *dev, unsigned long address,
unsigned long length)
{
printk(KERN_INFO "i810_dma_dispatch\n");
}
static inline void i810_dma_quiescent(drm_device_t *dev)
{
}
static inline void i810_dma_ready(drm_device_t *dev)
{
i810_dma_quiescent(dev);
printk(KERN_INFO "i810_dma_ready\n");
}
static inline int i810_dma_is_ready(drm_device_t *dev)
{
i810_dma_quiescent(dev);
printk(KERN_INFO "i810_dma_is_ready\n");
return 1;
}
static void i810_dma_service(int irq, void *device, struct pt_regs *regs)
{
drm_device_t *dev = (drm_device_t *)device;
drm_device_dma_t *dma = dev->dma;
atomic_inc(&dev->total_irq);
if (i810_dma_is_ready(dev)) {
/* Free previous buffer */
if (test_and_set_bit(0, &dev->dma_flag)) {
atomic_inc(&dma->total_missed_free);
return;
}
if (dma->this_buffer) {
drm_free_buffer(dev, dma->this_buffer);
dma->this_buffer = NULL;
}
clear_bit(0, &dev->dma_flag);
/* Dispatch new buffer */
queue_task(&dev->tq, &tq_immediate);
mark_bh(IMMEDIATE_BH);
}
}
/* Only called by i810_dma_schedule. */
static int i810_do_dma(drm_device_t *dev, int locked)
{
unsigned long address;
unsigned long length;
drm_buf_t *buf;
int retcode = 0;
drm_device_dma_t *dma = dev->dma;
#if DRM_DMA_HISTOGRAM
cycles_t dma_start, dma_stop;
#endif
if (test_and_set_bit(0, &dev->dma_flag)) {
atomic_inc(&dma->total_missed_dma);
return -EBUSY;
}
#if DRM_DMA_HISTOGRAM
dma_start = get_cycles();
#endif
if (!dma->next_buffer) {
DRM_ERROR("No next_buffer\n");
clear_bit(0, &dev->dma_flag);
return -EINVAL;
}
buf = dma->next_buffer;
address = (unsigned long)buf->bus_address;
length = buf->used;
DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
buf->context, buf->idx, length);
if (buf->list == DRM_LIST_RECLAIM) {
drm_clear_next_buffer(dev);
drm_free_buffer(dev, buf);
clear_bit(0, &dev->dma_flag);
return -EINVAL;
}
if (!length) {
DRM_ERROR("0 length buffer\n");
drm_clear_next_buffer(dev);
drm_free_buffer(dev, buf);
clear_bit(0, &dev->dma_flag);
return 0;
}
if (!i810_dma_is_ready(dev)) {
clear_bit(0, &dev->dma_flag);
return -EBUSY;
}
if (buf->while_locked) {
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("Dispatching buffer %d from pid %d"
" \"while locked\", but no lock held\n",
buf->idx, buf->pid);
}
} else {
if (!locked && !drm_lock_take(&dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
atomic_inc(&dma->total_missed_lock);
clear_bit(0, &dev->dma_flag);
return -EBUSY;
}
}
if (dev->last_context != buf->context
&& !(dev->queuelist[buf->context]->flags
& _DRM_CONTEXT_PRESERVED)) {
/* PRE: dev->last_context != buf->context */
if (drm_context_switch(dev, dev->last_context, buf->context)) {
drm_clear_next_buffer(dev);
drm_free_buffer(dev, buf);
}
retcode = -EBUSY;
goto cleanup;
/* POST: we will wait for the context
switch and will dispatch on a later call
when dev->last_context == buf->context.
NOTE WE HOLD THE LOCK THROUGHOUT THIS
TIME! */
}
drm_clear_next_buffer(dev);
buf->pending = 1;
buf->waiting = 0;
buf->list = DRM_LIST_PEND;
#if DRM_DMA_HISTOGRAM
buf->time_dispatched = get_cycles();
#endif
i810_dma_dispatch(dev, address, length);
drm_free_buffer(dev, dma->this_buffer);
dma->this_buffer = buf;
atomic_add(length, &dma->total_bytes);
atomic_inc(&dma->total_dmas);
if (!buf->while_locked && !dev->context_flag && !locked) {
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
}
cleanup:
clear_bit(0, &dev->dma_flag);
#if DRM_DMA_HISTOGRAM
dma_stop = get_cycles();
atomic_inc(&dev->histo.dma[drm_histogram_slot(dma_stop - dma_start)]);
#endif
return retcode;
}
static void i810_dma_schedule_timer_wrapper(unsigned long dev)
{
i810_dma_schedule((drm_device_t *)dev, 0);
}
static void i810_dma_schedule_tq_wrapper(void *dev)
{
i810_dma_schedule(dev, 0);
}
int i810_dma_schedule(drm_device_t *dev, int locked)
{
int next;
drm_queue_t *q;
drm_buf_t *buf;
int retcode = 0;
int processed = 0;
int missed;
int expire = 20;
drm_device_dma_t *dma = dev->dma;
#if DRM_DMA_HISTOGRAM
cycles_t schedule_start;
#endif
if (test_and_set_bit(0, &dev->interrupt_flag)) {
/* Not reentrant */
atomic_inc(&dma->total_missed_sched);
return -EBUSY;
}
missed = atomic_read(&dma->total_missed_sched);
#if DRM_DMA_HISTOGRAM
schedule_start = get_cycles();
#endif
again:
if (dev->context_flag) {
clear_bit(0, &dev->interrupt_flag);
return -EBUSY;
}
if (dma->next_buffer) {
/* Unsent buffer that was previously
selected, but that couldn't be sent
because the lock could not be obtained
or the DMA engine wasn't ready. Try
again. */
atomic_inc(&dma->total_tried);
if (!(retcode = i810_do_dma(dev, locked))) {
atomic_inc(&dma->total_hit);
++processed;
}
} else {
do {
next = drm_select_queue(dev,
i810_dma_schedule_timer_wrapper);
if (next >= 0) {
q = dev->queuelist[next];
buf = drm_waitlist_get(&q->waitlist);
dma->next_buffer = buf;
dma->next_queue = q;
if (buf && buf->list == DRM_LIST_RECLAIM) {
drm_clear_next_buffer(dev);
drm_free_buffer(dev, buf);
}
}
} while (next >= 0 && !dma->next_buffer);
if (dma->next_buffer) {
if (!(retcode = i810_do_dma(dev, locked))) {
++processed;
}
}
}
if (--expire) {
if (missed != atomic_read(&dma->total_missed_sched)) {
atomic_inc(&dma->total_lost);
if (i810_dma_is_ready(dev)) goto again;
}
if (processed && i810_dma_is_ready(dev)) {
atomic_inc(&dma->total_lost);
processed = 0;
goto again;
}
}
clear_bit(0, &dev->interrupt_flag);
#if DRM_DMA_HISTOGRAM
atomic_inc(&dev->histo.schedule[drm_histogram_slot(get_cycles()
- schedule_start)]);
#endif
return retcode;
}
static int i810_dma_priority(drm_device_t *dev, drm_dma_t *d)
{
unsigned long address;
unsigned long length;
int must_free = 0;
int retcode = 0;
int i;
int idx;
drm_buf_t *buf;
drm_buf_t *last_buf = NULL;
drm_device_dma_t *dma = dev->dma;
DECLARE_WAITQUEUE(entry, current);
/* Turn off interrupt handling */
while (test_and_set_bit(0, &dev->interrupt_flag)) {
schedule();
if (signal_pending(current)) return -EINTR;
}
if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
while (!drm_lock_take(&dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
schedule();
if (signal_pending(current)) {
clear_bit(0, &dev->interrupt_flag);
return -EINTR;
}
}
++must_free;
}
atomic_inc(&dma->total_prio);
for (i = 0; i < d->send_count; i++) {
idx = d->send_indices[i];
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("Index %d (of %d max)\n",
d->send_indices[i], dma->buf_count - 1);
continue;
}
buf = dma->buflist[ idx ];
if (buf->pid != current->pid) {
DRM_ERROR("Process %d using buffer owned by %d\n",
current->pid, buf->pid);
retcode = -EINVAL;
goto cleanup;
}
if (buf->list != DRM_LIST_NONE) {
DRM_ERROR("Process %d using %d's buffer on list %d\n",
current->pid, buf->pid, buf->list);
retcode = -EINVAL;
goto cleanup;
}
/* This isn't a race condition on
buf->list, since our concern is the
buffer reclaim during the time the
process closes the /dev/drm? handle, so
it can't also be doing DMA. */
buf->list = DRM_LIST_PRIO;
buf->used = d->send_sizes[i];
buf->context = d->context;
buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
address = (unsigned long)buf->address;
length = buf->used;
if (!length) {
DRM_ERROR("0 length buffer\n");
}
if (buf->pending) {
DRM_ERROR("Sending pending buffer:"
" buffer %d, offset %d\n",
d->send_indices[i], i);
retcode = -EINVAL;
goto cleanup;
}
if (buf->waiting) {
DRM_ERROR("Sending waiting buffer:"
" buffer %d, offset %d\n",
d->send_indices[i], i);
retcode = -EINVAL;
goto cleanup;
}
buf->pending = 1;
if (dev->last_context != buf->context
&& !(dev->queuelist[buf->context]->flags
& _DRM_CONTEXT_PRESERVED)) {
add_wait_queue(&dev->context_wait, &entry);
current->state = TASK_INTERRUPTIBLE;
/* PRE: dev->last_context != buf->context */
drm_context_switch(dev, dev->last_context,
buf->context);
/* POST: we will wait for the context
switch and will dispatch on a later call
when dev->last_context == buf->context.
NOTE WE HOLD THE LOCK THROUGHOUT THIS
TIME! */
schedule();
current->state = TASK_RUNNING;
remove_wait_queue(&dev->context_wait, &entry);
if (signal_pending(current)) {
retcode = -EINTR;
goto cleanup;
}
if (dev->last_context != buf->context) {
DRM_ERROR("Context mismatch: %d %d\n",
dev->last_context,
buf->context);
}
}
#if DRM_DMA_HISTOGRAM
buf->time_queued = get_cycles();
buf->time_dispatched = buf->time_queued;
#endif
i810_dma_dispatch(dev, address, length);
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
atomic_add(length, &dma->total_bytes);
atomic_inc(&dma->total_dmas);
if (last_buf) {
drm_free_buffer(dev, last_buf);
}
last_buf = buf;
}
cleanup:
if (last_buf) {
i810_dma_ready(dev);
drm_free_buffer(dev, last_buf);
}
if (must_free && !dev->context_flag) {
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
}
clear_bit(0, &dev->interrupt_flag);
return retcode;
}
static int i810_dma_send_buffers(drm_device_t *dev, drm_dma_t *d)
{
DECLARE_WAITQUEUE(entry, current);
drm_buf_t *last_buf = NULL;
int retcode = 0;
drm_device_dma_t *dma = dev->dma;
if (d->flags & _DRM_DMA_BLOCK) {
last_buf = dma->buflist[d->send_indices[d->send_count-1]];
add_wait_queue(&last_buf->dma_wait, &entry);
}
if ((retcode = drm_dma_enqueue(dev, d))) {
if (d->flags & _DRM_DMA_BLOCK)
remove_wait_queue(&last_buf->dma_wait, &entry);
return retcode;
}
i810_dma_schedule(dev, 0);
if (d->flags & _DRM_DMA_BLOCK) {
DRM_DEBUG("%d waiting\n", current->pid);
current->state = TASK_INTERRUPTIBLE;
for (;;) {
if (!last_buf->waiting
&& !last_buf->pending)
break; /* finished */
schedule();
if (signal_pending(current)) {
retcode = -EINTR; /* Can't restart */
break;
}
}
current->state = TASK_RUNNING;
DRM_DEBUG("%d running\n", current->pid);
remove_wait_queue(&last_buf->dma_wait, &entry);
if (!retcode
|| (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
if (!waitqueue_active(&last_buf->dma_wait)) {
drm_free_buffer(dev, last_buf);
}
}
if (retcode) {
DRM_ERROR("ctx%d w%d p%d c%d i%d l%d %d/%d\n",
d->context,
last_buf->waiting,
last_buf->pending,
DRM_WAITCOUNT(dev, d->context),
last_buf->idx,
last_buf->list,
last_buf->pid,
current->pid);
}
}
return retcode;
}
int i810_dma(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
int retcode = 0;
drm_dma_t d;
printk("i810_dma start\n");
copy_from_user_ret(&d, (drm_dma_t *)arg, sizeof(d), -EFAULT);
DRM_DEBUG("%d %d: %d send, %d req\n",
current->pid, d.context, d.send_count, d.request_count);
if (d.context == DRM_KERNEL_CONTEXT || d.context >= dev->queue_slots) {
DRM_ERROR("Process %d using context %d\n",
current->pid, d.context);
return -EINVAL;
}
if (d.send_count < 0 || d.send_count > dma->buf_count) {
DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
current->pid, d.send_count, dma->buf_count);
return -EINVAL;
}
if (d.request_count < 0 || d.request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
current->pid, d.request_count, dma->buf_count);
return -EINVAL;
}
if (d.send_count) {
#if 0
if (d.flags & _DRM_DMA_PRIORITY)
retcode = i810_dma_priority(dev, &d);
else
retcode = i810_dma_send_buffers(dev, &d);
#endif
printk("i810_dma priority\n");
retcode = i810_dma_priority(dev, &d);
}
d.granted_count = 0;
if (!retcode && d.request_count) {
retcode = drm_dma_get_buffers(dev, &d);
}
DRM_DEBUG("%d returning, granted = %d\n",
current->pid, d.granted_count);
copy_to_user_ret((drm_dma_t *)arg, &d, sizeof(d), -EFAULT);
printk("i810_dma end (granted)\n");
return retcode;
}
int i810_irq_install(drm_device_t *dev, int irq)
{
int retcode;
if (!irq) return -EINVAL;
down(&dev->struct_sem);
if (dev->irq) {
up(&dev->struct_sem);
return -EBUSY;
}
dev->irq = irq;
up(&dev->struct_sem);
DRM_DEBUG("%d\n", irq);
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma_flag = 0;
dev->dma->next_buffer = NULL;
dev->dma->next_queue = NULL;
dev->dma->this_buffer = NULL;
dev->tq.next = NULL;
dev->tq.sync = 0;
dev->tq.routine = i810_dma_schedule_tq_wrapper;
dev->tq.data = dev;
/* Before installing handler */
/* TODO */
/* Install handler */
if ((retcode = request_irq(dev->irq,
i810_dma_service,
0,
dev->devname,
dev))) {
down(&dev->struct_sem);
dev->irq = 0;
up(&dev->struct_sem);
return retcode;
}
/* After installing handler */
/* TODO */
return 0;
}
int i810_irq_uninstall(drm_device_t *dev)
{
int irq;
down(&dev->struct_sem);
irq = dev->irq;
dev->irq = 0;
up(&dev->struct_sem);
if (!irq) return -EINVAL;
DRM_DEBUG("%d\n", irq);
/* TODO : Disable interrupts */
free_irq(irq, dev);
return 0;
}
int i810_control(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_control_t ctl;
int retcode;
printk(KERN_INFO "i810_control\n");
i810_dma_init(dev);
copy_from_user_ret(&ctl, (drm_control_t *)arg, sizeof(ctl), -EFAULT);
switch (ctl.func) {
case DRM_INST_HANDLER:
if ((retcode = i810_irq_install(dev, ctl.irq)))
return retcode;
break;
case DRM_UNINST_HANDLER:
if ((retcode = i810_irq_uninstall(dev)))
return retcode;
break;
default:
return -EINVAL;
}
return 0;
}
int i810_lock(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
DECLARE_WAITQUEUE(entry, current);
int ret = 0;
drm_lock_t lock;
drm_queue_t *q;
#if DRM_DMA_HISTOGRAM
cycles_t start;
dev->lck_start = start = get_cycles();
#endif
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
current->pid, lock.context);
return -EINVAL;
}
if (lock.context < 0 || lock.context >= dev->queue_count) {
return -EINVAL;
}
q = dev->queuelist[lock.context];
ret = drm_flush_block_and_flush(dev, lock.context, lock.flags);
if (!ret) {
if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
!= lock.context) {
long j = jiffies - dev->lock.lock_time;
if (j > 0 && j <= DRM_LOCK_SLICE) {
/* Can't take lock if we just had it and
there is contention. */
current->state = TASK_INTERRUPTIBLE;
schedule_timeout(j);
}
}
add_wait_queue(&dev->lock.lock_queue, &entry);
for (;;) {
if (!dev->lock.hw_lock) {
/* Device has been unregistered */
ret = -EINTR;
break;
}
if (drm_lock_take(&dev->lock.hw_lock->lock,
lock.context)) {
dev->lock.pid = current->pid;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->total_locks);
atomic_inc(&q->total_locks);
break; /* Got lock */
}
/* Contention */
atomic_inc(&dev->total_sleeps);
current->state = TASK_INTERRUPTIBLE;
schedule();
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
}
current->state = TASK_RUNNING;
remove_wait_queue(&dev->lock.lock_queue, &entry);
}
drm_flush_unblock(dev, lock.context, lock.flags); /* cleanup phase */
if (!ret) {
if (lock.flags & _DRM_LOCK_READY)
i810_dma_ready(dev);
if (lock.flags & _DRM_LOCK_QUIESCENT)
i810_dma_quiescent(dev);
}
#if DRM_DMA_HISTOGRAM
atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]);
#endif
return ret;
}

583
linux/i810_drv.c Normal file
View File

@ -0,0 +1,583 @@
/* i810_drv.c -- I810 driver -*- linux-c -*-
* Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
* Jeff Hartmann <jhartmann@precisioninsight.com>
*
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/i810_drv.c,v 1.1 2000/02/11 17:26:05 dawes Exp $
*
*/
#define EXPORT_SYMTAB
#include "drmP.h"
#include "i810_drv.h"
EXPORT_SYMBOL(i810_init);
EXPORT_SYMBOL(i810_cleanup);
#define I810_NAME "i810"
#define I810_DESC "Matrox g200/g400"
#define I810_DATE "19991213"
#define I810_MAJOR 0
#define I810_MINOR 0
#define I810_PATCHLEVEL 1
static drm_device_t i810_device;
drm_ctx_t i810_res_ctx;
static struct file_operations i810_fops = {
open: i810_open,
flush: drm_flush,
release: i810_release,
ioctl: i810_ioctl,
mmap: drm_mmap,
read: drm_read,
fasync: drm_fasync,
};
static struct miscdevice i810_misc = {
minor: MISC_DYNAMIC_MINOR,
name: I810_NAME,
fops: &i810_fops,
};
static drm_ioctl_desc_t i810_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { i810_version, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { i810_control, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { i810_addbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { i810_markbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { i810_infobufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { i810_mapbufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { i810_freebufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { drm_addctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { drm_rmctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { drm_modctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { drm_getctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { drm_switchctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { drm_newctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { drm_resctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { i810_dma, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { i810_lock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { i810_unlock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
};
#define I810_IOCTL_COUNT DRM_ARRAY_SIZE(i810_ioctls)
#ifdef MODULE
int init_module(void);
void cleanup_module(void);
static char *i810 = NULL;
MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas.");
MODULE_DESCRIPTION("Intel I810");
MODULE_PARM(i810, "s");
/* init_module is called when insmod is used to load the module */
int init_module(void)
{
printk("doing i810_init()\n");
return i810_init();
}
/* cleanup_module is called when rmmod is used to unload the module */
void cleanup_module(void)
{
i810_cleanup();
}
#endif
#ifndef MODULE
/* i810_setup is called by the kernel to parse command-line options passed
* via the boot-loader (e.g., LILO). It calls the insmod option routine,
* drm_parse_drm.
*
* This is not currently supported, since it requires changes to
* linux/init/main.c. */
void __init i810_setup(char *str, int *ints)
{
if (ints[0] != 0) {
DRM_ERROR("Illegal command line format, ignored\n");
return;
}
drm_parse_options(str);
}
#endif
static int i810_setup(drm_device_t *dev)
{
int i;
atomic_set(&dev->ioctl_count, 0);
atomic_set(&dev->vma_count, 0);
dev->buf_use = 0;
atomic_set(&dev->buf_alloc, 0);
drm_dma_setup(dev);
atomic_set(&dev->total_open, 0);
atomic_set(&dev->total_close, 0);
atomic_set(&dev->total_ioctl, 0);
atomic_set(&dev->total_irq, 0);
atomic_set(&dev->total_ctx, 0);
atomic_set(&dev->total_locks, 0);
atomic_set(&dev->total_unlocks, 0);
atomic_set(&dev->total_contends, 0);
atomic_set(&dev->total_sleeps, 0);
for (i = 0; i < DRM_HASH_SIZE; i++) {
dev->magiclist[i].head = NULL;
dev->magiclist[i].tail = NULL;
}
dev->maplist = NULL;
dev->map_count = 0;
dev->vmalist = NULL;
dev->lock.hw_lock = NULL;
init_waitqueue_head(&dev->lock.lock_queue);
dev->queue_count = 0;
dev->queue_reserved = 0;
dev->queue_slots = 0;
dev->queuelist = NULL;
dev->irq = 0;
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma_flag = 0;
dev->last_context = 0;
dev->last_switch = 0;
dev->last_checked = 0;
init_timer(&dev->timer);
init_waitqueue_head(&dev->context_wait);
#if DRM_DMA_HISTO
memset(&dev->histo, 0, sizeof(dev->histo));
#endif
dev->ctx_start = 0;
dev->lck_start = 0;
dev->buf_rp = dev->buf;
dev->buf_wp = dev->buf;
dev->buf_end = dev->buf + DRM_BSZ;
dev->buf_async = NULL;
init_waitqueue_head(&dev->buf_readers);
init_waitqueue_head(&dev->buf_writers);
DRM_DEBUG("\n");
/* The kernel's context could be created here, but is now created
in drm_dma_enqueue. This is more resource-efficient for
hardware that does not do DMA, but may mean that
drm_select_queue fails between the time the interrupt is
initialized and the time the queues are initialized. */
return 0;
}
static int i810_takedown(drm_device_t *dev)
{
int i;
drm_magic_entry_t *pt, *next;
drm_map_t *map;
drm_vma_entry_t *vma, *vma_next;
DRM_DEBUG("\n");
if (dev->irq) i810_irq_uninstall(dev);
down(&dev->struct_sem);
del_timer(&dev->timer);
if (dev->devname) {
drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
dev->devname = NULL;
}
if (dev->unique) {
drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
dev->unique = NULL;
dev->unique_len = 0;
}
/* Clear pid list */
for (i = 0; i < DRM_HASH_SIZE; i++) {
for (pt = dev->magiclist[i].head; pt; pt = next) {
next = pt->next;
drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
}
dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
}
/* Clear AGP information */
if (dev->agp) {
drm_agp_mem_t *entry;
drm_agp_mem_t *nexte;
/* Remove AGP resources, but leave dev->agp
intact until r128_cleanup is called. */
for (entry = dev->agp->memory; entry; entry = nexte) {
nexte = entry->next;
if (entry->bound) drm_unbind_agp(entry->memory);
drm_free_agp(entry->memory, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
}
dev->agp->memory = NULL;
if (dev->agp->acquired && drm_agp.release)
(*drm_agp.release)();
dev->agp->acquired = 0;
dev->agp->enabled = 0;
}
/* Clear vma list (only built for debugging) */
if (dev->vmalist) {
for (vma = dev->vmalist; vma; vma = vma_next) {
vma_next = vma->next;
drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
dev->vmalist = NULL;
}
/* Clear map area and mtrr information */
if (dev->maplist) {
for (i = 0; i < dev->map_count; i++) {
map = dev->maplist[i];
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
#ifdef CONFIG_MTRR
if (map->mtrr >= 0) {
int retcode;
retcode = mtrr_del(map->mtrr,
map->offset,
map->size);
DRM_DEBUG("mtrr_del = %d\n", retcode);
}
#endif
drm_ioremapfree(map->handle, map->size);
break;
case _DRM_SHM:
drm_free_pages((unsigned long)map->handle,
drm_order(map->size)
- PAGE_SHIFT,
DRM_MEM_SAREA);
break;
case _DRM_AGP:
break;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
drm_free(dev->maplist,
dev->map_count * sizeof(*dev->maplist),
DRM_MEM_MAPS);
dev->maplist = NULL;
dev->map_count = 0;
}
if (dev->queuelist) {
for (i = 0; i < dev->queue_count; i++) {
drm_waitlist_destroy(&dev->queuelist[i]->waitlist);
if (dev->queuelist[i]) {
drm_free(dev->queuelist[i],
sizeof(*dev->queuelist[0]),
DRM_MEM_QUEUES);
dev->queuelist[i] = NULL;
}
}
drm_free(dev->queuelist,
dev->queue_slots * sizeof(*dev->queuelist),
DRM_MEM_QUEUES);
dev->queuelist = NULL;
}
drm_dma_takedown(dev);
dev->queue_count = 0;
if (dev->lock.hw_lock) {
dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.pid = 0;
wake_up_interruptible(&dev->lock.lock_queue);
}
up(&dev->struct_sem);
return 0;
}
/* i810_init is called via init_module at module load time, or via
* linux/init/main.c (this is not currently supported). */
int i810_init(void)
{
int retcode;
drm_device_t *dev = &i810_device;
DRM_DEBUG("\n");
memset((void *)dev, 0, sizeof(*dev));
dev->count_lock = SPIN_LOCK_UNLOCKED;
sema_init(&dev->struct_sem, 1);
#ifdef MODULE
drm_parse_options(i810);
#endif
printk("doing misc_register\n");
if ((retcode = misc_register(&i810_misc))) {
DRM_ERROR("Cannot register \"%s\"\n", I810_NAME);
return retcode;
}
dev->device = MKDEV(MISC_MAJOR, i810_misc.minor);
dev->name = I810_NAME;
printk("doing mem init\n");
drm_mem_init();
printk("doing proc init\n");
drm_proc_init(dev);
printk("doing agp init\n");
dev->agp = drm_agp_init();
printk("doing ctxbitmap init\n");
if((retcode = drm_ctxbitmap_init(dev))) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
drm_proc_cleanup();
misc_deregister(&i810_misc);
i810_takedown(dev);
return retcode;
}
#if 0
printk("doing i810_dma_init\n");
i810_dma_init(dev);
#endif
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
I810_NAME,
I810_MAJOR,
I810_MINOR,
I810_PATCHLEVEL,
I810_DATE,
i810_misc.minor);
return 0;
}
/* i810_cleanup is called via cleanup_module at module unload time. */
void i810_cleanup(void)
{
drm_device_t *dev = &i810_device;
DRM_DEBUG("\n");
drm_proc_cleanup();
if (misc_deregister(&i810_misc)) {
DRM_ERROR("Cannot unload module\n");
} else {
DRM_INFO("Module unloaded\n");
}
drm_ctxbitmap_cleanup(dev);
i810_dma_cleanup(dev);
i810_takedown(dev);
if (dev->agp) {
drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
dev->agp = NULL;
}
}
int i810_version(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_version_t version;
int len;
copy_from_user_ret(&version,
(drm_version_t *)arg,
sizeof(version),
-EFAULT);
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
copy_to_user_ret(name, value, len, -EFAULT); \
}
version.version_major = I810_MAJOR;
version.version_minor = I810_MINOR;
version.version_patchlevel = I810_PATCHLEVEL;
DRM_COPY(version.name, I810_NAME);
DRM_COPY(version.date, I810_DATE);
DRM_COPY(version.desc, I810_DESC);
copy_to_user_ret((drm_version_t *)arg,
&version,
sizeof(version),
-EFAULT);
return 0;
}
int i810_open(struct inode *inode, struct file *filp)
{
drm_device_t *dev = &i810_device;
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (!(retcode = drm_open_helper(inode, filp, dev))) {
MOD_INC_USE_COUNT;
atomic_inc(&dev->total_open);
spin_lock(&dev->count_lock);
if (!dev->open_count++) {
spin_unlock(&dev->count_lock);
return i810_setup(dev);
}
spin_unlock(&dev->count_lock);
}
return retcode;
}
int i810_release(struct inode *inode, struct file *filp)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (!(retcode = drm_release(inode, filp))) {
MOD_DEC_USE_COUNT;
atomic_inc(&dev->total_close);
spin_lock(&dev->count_lock);
if (!--dev->open_count) {
if (atomic_read(&dev->ioctl_count) || dev->blocked) {
DRM_ERROR("Device busy: %d %d\n",
atomic_read(&dev->ioctl_count),
dev->blocked);
spin_unlock(&dev->count_lock);
return -EBUSY;
}
spin_unlock(&dev->count_lock);
return i810_takedown(dev);
}
spin_unlock(&dev->count_lock);
}
return retcode;
}
/* drm_ioctl is called whenever a process performs an ioctl on /dev/drm. */
int i810_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
int nr = DRM_IOCTL_NR(cmd);
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int retcode = 0;
drm_ioctl_desc_t *ioctl;
drm_ioctl_t *func;
atomic_inc(&dev->ioctl_count);
atomic_inc(&dev->total_ioctl);
++priv->ioctl_count;
DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
current->pid, cmd, nr, dev->device, priv->authenticated);
if (nr >= I810_IOCTL_COUNT) {
retcode = -EINVAL;
} else {
ioctl = &i810_ioctls[nr];
func = ioctl->func;
if (!func) {
DRM_DEBUG("no function\n");
retcode = -EINVAL;
} else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
|| (ioctl->auth_needed && !priv->authenticated)) {
retcode = -EACCES;
} else {
retcode = (func)(inode, filp, cmd, arg);
}
}
atomic_dec(&dev->ioctl_count);
return retcode;
}
int i810_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_lock_t lock;
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
current->pid, lock.context);
return -EINVAL;
}
DRM_DEBUG("%d frees lock (%d holds)\n",
lock.context,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
atomic_inc(&dev->total_unlocks);
if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
atomic_inc(&dev->total_contends);
drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
i810_dma_schedule(dev, 1);
if (!dev->context_flag) {
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
}
#if DRM_DMA_HISTOGRAM
atomic_inc(&dev->histo.lhld[drm_histogram_slot(get_cycles()
- dev->lck_start)]);
#endif
return 0;
}

76
linux/i810_drv.h Normal file
View File

@ -0,0 +1,76 @@
/* i810_drv.h -- Private header for the Matrox g200/g400 driver -*- linux-c -*-
* Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
* Jeff Hartmann <jhartmann@precisioninsight.com>
*
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/i810_drv.h,v 1.1 2000/02/11 17:26:05 dawes Exp $
*/
#ifndef _I810_DRV_H_
#define _I810_DRV_H_
/* i810_drv.c */
extern int i810_init(void);
extern void i810_cleanup(void);
extern int i810_version(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_open(struct inode *inode, struct file *filp);
extern int i810_release(struct inode *inode, struct file *filp);
extern int i810_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_unlock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* i810_dma.c */
extern int i810_dma_schedule(drm_device_t *dev, int locked);
extern int i810_dma(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_irq_install(drm_device_t *dev, int irq);
extern int i810_irq_uninstall(drm_device_t *dev);
extern int i810_control(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_lock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern void i810_dma_init(drm_device_t *dev);
extern void i810_dma_cleanup(drm_device_t *dev);
/* i810_bufs.c */
extern int i810_addbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_infobufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_markbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_freebufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_mapbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_addmap(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
#endif

View File

@ -1,6 +1,6 @@
/* lock.c -- IOCTLs for locking -*- linux-c -*-
* Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
* Revised: Mon Dec 6 16:04:44 1999 by faith@precisioninsight.com
* Revised: Sun Feb 13 23:38:25 2000 by kevin@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
@ -25,7 +25,7 @@
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/lock.c,v 1.5 1999/08/30 13:05:00 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/lock.c,v 1.2 1999/12/14 01:33:57 robin Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/lock.c,v 1.4 2000/02/14 06:27:27 martin Exp $
*
*/

View File

@ -1,6 +1,6 @@
/* memory.c -- Memory management wrappers for DRM -*- linux-c -*-
* Created: Thu Feb 4 14:00:34 1999 by faith@precisioninsight.com
* Revised: Mon Dec 6 10:28:18 1999 by faith@precisioninsight.com
* Revised: Sun Feb 13 23:39:37 2000 by kevin@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
@ -25,7 +25,7 @@
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/memory.c,v 1.4 1999/08/20 20:00:53 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/memory.c,v 1.2 1999/12/14 01:33:57 robin Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/memory.c,v 1.4 2000/02/14 06:27:28 martin Exp $
*
*/

636
linux/mga_bufs.c Normal file
View File

@ -0,0 +1,636 @@
/* mga_bufs.c -- IOCTLs to manage buffers -*- linux-c -*-
* Created: Thu Jan 6 01:47:26 2000 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
* Jeff Hartmann <jhartmann@precisioninsight.com>
*
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_bufs.c,v 1.1 2000/02/11 17:26:06 dawes Exp $
*
*/
#define __NO_VERSION__
#include "drmP.h"
#include "mga_drv.h"
#include "mga_dma.h"
#include "linux/un.h"
int mga_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
drm_buf_entry_t *entry;
drm_buf_t *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
int order;
int size;
int alignment;
int page_order;
int total;
int byte_count;
int i;
if (!dma) return -EINVAL;
copy_from_user_ret(&request,
(drm_buf_desc_t *)arg,
sizeof(request),
-EFAULT);
count = request.count;
order = drm_order(request.size);
size = 1 << order;
agp_offset = request.agp_start;
alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) :size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
byte_count = 0;
DRM_DEBUG("count: %d\n", count);
DRM_DEBUG("order: %d\n", order);
DRM_DEBUG("size: %d\n", size);
DRM_DEBUG("agp_offset: %d\n", agp_offset);
DRM_DEBUG("alignment: %d\n", alignment);
DRM_DEBUG("page_order: %d\n", page_order);
DRM_DEBUG("total: %d\n", total);
DRM_DEBUG("byte_count: %d\n", byte_count);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
if (dev->queue_count) return -EBUSY; /* Not while in use */
spin_lock(&dev->count_lock);
if (dev->buf_use) {
spin_unlock(&dev->count_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->count_lock);
down(&dev->struct_sem);
entry = &dma->bufs[order];
if (entry->buf_count) {
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->buf_size = size;
entry->page_order = page_order;
offset = 0;
while(entry->buf_count < count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
DRM_DEBUG("offset : %d\n", offset);
buf->offset = offset; /* Hrm */
buf->bus_address = dev->agp->base + agp_offset + offset;
buf->address = (void *)(agp_offset + offset + dev->agp->base);
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
init_waitqueue_head(&buf->dma_wait);
buf->pid = 0;
buf->dev_private = drm_alloc(sizeof(drm_mga_buf_priv_t), DRM_MEM_BUFS);
buf->dev_priv_size = sizeof(drm_mga_buf_priv_t);
#if DRM_DMA_HISTOGRAM
buf->time_queued = 0;
buf->time_dispatched = 0;
buf->time_completed = 0;
buf->time_freed = 0;
#endif
offset = offset + alignment;
entry->buf_count++;
byte_count += PAGE_SIZE << page_order;
DRM_DEBUG("buffer %d @ %p\n",
entry->buf_count, buf->address);
}
dma->buflist = drm_realloc(dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist),
DRM_MEM_BUFS);
for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
dma->buflist[i] = &entry->buflist[i - dma->buf_count];
dma->buf_count += entry->buf_count;
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
dma->byte_count += byte_count;
DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
drm_freelist_create(&entry->freelist, entry->buf_count);
for (i = 0; i < entry->buf_count; i++) {
drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
}
up(&dev->struct_sem);
request.count = entry->buf_count;
request.size = size;
copy_to_user_ret((drm_buf_desc_t *)arg,
&request,
sizeof(request),
-EFAULT);
atomic_dec(&dev->buf_alloc);
DRM_DEBUG("count: %d\n", count);
DRM_DEBUG("order: %d\n", order);
DRM_DEBUG("size: %d\n", size);
DRM_DEBUG("agp_offset: %d\n", agp_offset);
DRM_DEBUG("alignment: %d\n", alignment);
DRM_DEBUG("page_order: %d\n", page_order);
DRM_DEBUG("total: %d\n", total);
DRM_DEBUG("byte_count: %d\n", byte_count);
dma->flags = _DRM_DMA_USE_AGP;
DRM_DEBUG("dma->flags : %lx\n", dma->flags);
return 0;
}
int mga_addbufs_pci(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
int count;
int order;
int size;
int total;
int page_order;
drm_buf_entry_t *entry;
unsigned long page;
drm_buf_t *buf;
int alignment;
unsigned long offset;
int i;
int byte_count;
int page_count;
if (!dma) return -EINVAL;
copy_from_user_ret(&request,
(drm_buf_desc_t *)arg,
sizeof(request),
-EFAULT);
count = request.count;
order = drm_order(request.size);
size = 1 << order;
DRM_DEBUG("count = %d, size = %d (%d), order = %d, queue_count = %d\n",
request.count, request.size, size, order, dev->queue_count);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
if (dev->queue_count) return -EBUSY; /* Not while in use */
alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) :size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
spin_lock(&dev->count_lock);
if (dev->buf_use) {
spin_unlock(&dev->count_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->count_lock);
down(&dev->struct_sem);
entry = &dma->bufs[order];
if (entry->buf_count) {
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
DRM_MEM_SEGS);
if (!entry->seglist) {
drm_free(entry->buflist,
count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
memset(entry->seglist, 0, count * sizeof(*entry->seglist));
dma->pagelist = drm_realloc(dma->pagelist,
dma->page_count * sizeof(*dma->pagelist),
(dma->page_count + (count << page_order))
* sizeof(*dma->pagelist),
DRM_MEM_PAGES);
DRM_DEBUG("pagelist: %d entries\n",
dma->page_count + (count << page_order));
entry->buf_size = size;
entry->page_order = page_order;
byte_count = 0;
page_count = 0;
while (entry->buf_count < count) {
if (!(page = drm_alloc_pages(page_order, DRM_MEM_DMA))) break;
entry->seglist[entry->seg_count++] = page;
for (i = 0; i < (1 << page_order); i++) {
DRM_DEBUG("page %d @ 0x%08lx\n",
dma->page_count + page_count,
page + PAGE_SIZE * i);
dma->pagelist[dma->page_count + page_count++]
= page + PAGE_SIZE * i;
}
for (offset = 0;
offset + size <= total && entry->buf_count < count;
offset += alignment, ++entry->buf_count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
buf->offset = (dma->byte_count + byte_count + offset);
buf->address = (void *)(page + offset);
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
init_waitqueue_head(&buf->dma_wait);
buf->pid = 0;
#if DRM_DMA_HISTOGRAM
buf->time_queued = 0;
buf->time_dispatched = 0;
buf->time_completed = 0;
buf->time_freed = 0;
#endif
DRM_DEBUG("buffer %d @ %p\n",
entry->buf_count, buf->address);
}
byte_count += PAGE_SIZE << page_order;
}
dma->buflist = drm_realloc(dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist),
DRM_MEM_BUFS);
for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
dma->buflist[i] = &entry->buflist[i - dma->buf_count];
dma->buf_count += entry->buf_count;
dma->seg_count += entry->seg_count;
dma->page_count += entry->seg_count << page_order;
dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
drm_freelist_create(&entry->freelist, entry->buf_count);
for (i = 0; i < entry->buf_count; i++) {
drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
}
up(&dev->struct_sem);
request.count = entry->buf_count;
request.size = size;
copy_to_user_ret((drm_buf_desc_t *)arg,
&request,
sizeof(request),
-EFAULT);
atomic_dec(&dev->buf_alloc);
return 0;
}
int mga_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_buf_desc_t request;
copy_from_user_ret(&request,
(drm_buf_desc_t *)arg,
sizeof(request),
-EFAULT);
if(request.flags & _DRM_AGP_BUFFER)
return mga_addbufs_agp(inode, filp, cmd, arg);
else
return mga_addbufs_pci(inode, filp, cmd, arg);
}
int mga_infobufs(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_info_t request;
int i;
int count;
if (!dma) return -EINVAL;
spin_lock(&dev->count_lock);
if (atomic_read(&dev->buf_alloc)) {
spin_unlock(&dev->count_lock);
return -EBUSY;
}
++dev->buf_use; /* Can't allocate more after this call */
spin_unlock(&dev->count_lock);
copy_from_user_ret(&request,
(drm_buf_info_t *)arg,
sizeof(request),
-EFAULT);
for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
if (dma->bufs[i].buf_count) ++count;
}
DRM_DEBUG("count = %d\n", count);
if (request.count >= count) {
for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
if (dma->bufs[i].buf_count) {
copy_to_user_ret(&request.list[count].count,
&dma->bufs[i].buf_count,
sizeof(dma->bufs[0]
.buf_count),
-EFAULT);
copy_to_user_ret(&request.list[count].size,
&dma->bufs[i].buf_size,
sizeof(dma->bufs[0].buf_size),
-EFAULT);
copy_to_user_ret(&request.list[count].low_mark,
&dma->bufs[i]
.freelist.low_mark,
sizeof(dma->bufs[0]
.freelist.low_mark),
-EFAULT);
copy_to_user_ret(&request.list[count]
.high_mark,
&dma->bufs[i]
.freelist.high_mark,
sizeof(dma->bufs[0]
.freelist.high_mark),
-EFAULT);
DRM_DEBUG("%d %d %d %d %d\n",
i,
dma->bufs[i].buf_count,
dma->bufs[i].buf_size,
dma->bufs[i].freelist.low_mark,
dma->bufs[i].freelist.high_mark);
++count;
}
}
}
request.count = count;
copy_to_user_ret((drm_buf_info_t *)arg,
&request,
sizeof(request),
-EFAULT);
return 0;
}
int mga_markbufs(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
int order;
drm_buf_entry_t *entry;
if (!dma) return -EINVAL;
copy_from_user_ret(&request,
(drm_buf_desc_t *)arg,
sizeof(request),
-EFAULT);
DRM_DEBUG("%d, %d, %d\n",
request.size, request.low_mark, request.high_mark);
order = drm_order(request.size);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
entry = &dma->bufs[order];
if (request.low_mark < 0 || request.low_mark > entry->buf_count)
return -EINVAL;
if (request.high_mark < 0 || request.high_mark > entry->buf_count)
return -EINVAL;
entry->freelist.low_mark = request.low_mark;
entry->freelist.high_mark = request.high_mark;
return 0;
}
int mga_freebufs(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_free_t request;
int i;
int idx;
drm_buf_t *buf;
if (!dma) return -EINVAL;
copy_from_user_ret(&request,
(drm_buf_free_t *)arg,
sizeof(request),
-EFAULT);
DRM_DEBUG("%d\n", request.count);
for (i = 0; i < request.count; i++) {
copy_from_user_ret(&idx,
&request.list[i],
sizeof(idx),
-EFAULT);
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("Index %d (of %d max)\n",
idx, dma->buf_count - 1);
return -EINVAL;
}
buf = dma->buflist[idx];
if (buf->pid != current->pid) {
DRM_ERROR("Process %d freeing buffer owned by %d\n",
current->pid, buf->pid);
return -EINVAL;
}
drm_free_buffer(dev, buf);
}
return 0;
}
int mga_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
int retcode = 0;
const int zero = 0;
unsigned long virtual;
unsigned long address;
drm_buf_map_t request;
int i;
if (!dma) return -EINVAL;
DRM_DEBUG("\n");
spin_lock(&dev->count_lock);
if (atomic_read(&dev->buf_alloc)) {
spin_unlock(&dev->count_lock);
DRM_DEBUG("Buzy\n");
return -EBUSY;
}
++dev->buf_use; /* Can't allocate more after this call */
spin_unlock(&dev->count_lock);
copy_from_user_ret(&request,
(drm_buf_map_t *)arg,
sizeof(request),
-EFAULT);
DRM_DEBUG("mga_mapbufs\n");
DRM_DEBUG("dma->flags : %lx\n", dma->flags);
if (request.count >= dma->buf_count) {
if(dma->flags & _DRM_DMA_USE_AGP) {
drm_mga_private_t *dev_priv = dev->dev_private;
drm_map_t *map = NULL;
map = dev->maplist[dev_priv->buffer_map_idx];
if (!map) {
DRM_DEBUG("map is null\n");
retcode = -EINVAL;
goto done;
}
DRM_DEBUG("map->offset : %lx\n", map->offset);
DRM_DEBUG("map->size : %lx\n", map->size);
DRM_DEBUG("map->type : %d\n", map->type);
DRM_DEBUG("map->flags : %x\n", map->flags);
DRM_DEBUG("map->handle : %lx\n", map->handle);
DRM_DEBUG("map->mtrr : %d\n", map->mtrr);
virtual = do_mmap(filp, 0, map->size, PROT_READ|PROT_WRITE,
MAP_SHARED, (unsigned long)map->offset);
} else {
virtual = do_mmap(filp, 0, dma->byte_count,
PROT_READ|PROT_WRITE, MAP_SHARED, 0);
}
if (virtual > -1024UL) {
/* Real error */
DRM_DEBUG("mmap error\n");
retcode = (signed long)virtual;
goto done;
}
request.virtual = (void *)virtual;
for (i = 0; i < dma->buf_count; i++) {
if (copy_to_user(&request.list[i].idx,
&dma->buflist[i]->idx,
sizeof(request.list[0].idx))) {
retcode = -EFAULT;
goto done;
}
if (copy_to_user(&request.list[i].total,
&dma->buflist[i]->total,
sizeof(request.list[0].total))) {
retcode = -EFAULT;
goto done;
}
if (copy_to_user(&request.list[i].used,
&zero,
sizeof(zero))) {
retcode = -EFAULT;
goto done;
}
address = virtual + dma->buflist[i]->offset;
if (copy_to_user(&request.list[i].address,
&address,
sizeof(address))) {
retcode = -EFAULT;
goto done;
}
}
}
done:
request.count = dma->buf_count;
DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
copy_to_user_ret((drm_buf_map_t *)arg,
&request,
sizeof(request),
-EFAULT);
DRM_DEBUG("retcode : %d\n", retcode);
return retcode;
}

417
linux/mga_clear.c Normal file
View File

@ -0,0 +1,417 @@
/* mga_state.c -- State support for mga g200/g400 -*- linux-c -*-
*
* Created: February 2000 by keithw@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Keith Whitwell <keithw@precisioninsight.com>
*
*/
#define __NO_VERSION__
#include "drmP.h"
#include "mga_drv.h"
#include "mgareg_flags.h"
#include "mga_dma.h"
#include "mga_state.h"
#define MGA_CLEAR_CMD (DC_opcod_trap | DC_arzero_enable | \
DC_sgnzero_enable | DC_shftzero_enable | \
(0xC << DC_bop_SHIFT) | DC_clipdis_enable | \
DC_solid_enable | DC_transc_enable)
#define MGA_COPY_CMD (DC_opcod_bitblt | DC_atype_rpl | DC_linear_xy | \
DC_solid_disable | DC_arzero_disable | \
DC_sgnzero_enable | DC_shftzero_enable | \
(0xC << DC_bop_SHIFT) | DC_bltmod_bfcol | \
DC_pattern_disable | DC_transc_disable | \
DC_clipdis_enable) \
/* Build and queue a TT_GENERAL secondary buffer to do the clears.
* With Jeff's ringbuffer idea, it might make sense if there are only
* one or two cliprects to emit straight to the primary buffer.
*/
static int mgaClearBuffers(drm_device_t *dev,
int clear_color,
int clear_depth,
int flags)
{
int cmd, i;
drm_device_dma_t *dma = dev->dma;
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
xf86drmClipRectRec *pbox = sarea_priv->boxes;
int nbox = sarea_priv->nbox;
drm_buf_t *buf;
drm_dma_t d;
int order = 10; /* ??? what orders do we have ???*/
DMALOCALS;
if (!nbox)
return -EINVAL;
if ( dev_priv->sgram )
cmd = MGA_CLEAR_CMD | DC_atype_blk;
else
cmd = MGA_CLEAR_CMD | DC_atype_rstr;
buf = drm_freelist_get(&dma->bufs[order].freelist, _DRM_DMA_WAIT);
DMAGETPTR( buf );
for (i = 0 ; i < nbox ; i++) {
unsigned int height = pbox[i].y2 - pbox[i].y1;
/* Is it necessary to be this paranoid? I don't think so.
if (pbox[i].x1 > dev_priv->width) continue;
if (pbox[i].y1 > dev_priv->height) continue;
if (pbox[i].x2 > dev_priv->width) continue;
if (pbox[i].y2 > dev_priv->height) continue;
if (pbox[i].x2 <= pbox[i].x1) continue;
if (pbox[i].y2 <= pbox[i].x1) continue;
*/
DMAOUTREG(MGAREG_YDSTLEN, (pbox[i].y1<<16)|height);
DMAOUTREG(MGAREG_FXBNDRY, (pbox[i].x2<<16)|pbox[i].x1);
if ( flags & MGA_CLEAR_FRONT ) {
DMAOUTREG(MGAREG_FCOL, clear_color);
DMAOUTREG(MGAREG_DSTORG, dev_priv->frontOrg);
DMAOUTREG(MGAREG_DWGCTL+MGAREG_MGA_EXEC, cmd );
}
if ( flags & MGA_CLEAR_BACK ) {
DMAOUTREG(MGAREG_FCOL, clear_color);
DMAOUTREG(MGAREG_DSTORG, dev_priv->backOrg);
DMAOUTREG(MGAREG_DWGCTL+MGAREG_MGA_EXEC, cmd );
}
if ( flags & MGA_CLEAR_DEPTH )
{
DMAOUTREG(MGAREG_FCOL, clear_depth);
DMAOUTREG(MGAREG_DSTORG, dev_priv->depthOrg);
DMAOUTREG(MGAREG_DWGCTL+MGAREG_MGA_EXEC, cmd );
}
}
DMAADVANCE( buf );
/* Make sure we restore the 3D state next time.
*/
sarea_priv->dirty |= MGASAREA_NEW_CONTEXT;
((drm_mga_buf_priv_t *)buf->dev_private)->dma_type = MGA_DMA_GENERAL;
d.context = DRM_KERNEL_CONTEXT;
d.send_count = 1;
d.send_indices = &buf->idx;
d.send_sizes = &buf->used;
d.flags = 0;
d.request_count = 0;
d.request_size = 0;
d.request_indices = NULL;
d.request_sizes = NULL;
d.granted_count = 0;
atomic_inc(&dev_priv->pending_bufs);
if((drm_dma_enqueue(dev, &d)) != 0)
atomic_dec(&dev_priv->pending_bufs);
mga_dma_schedule(dev, 1);
return 0;
}
int mgaSwapBuffers(drm_device_t *dev, int flags)
{
drm_device_dma_t *dma = dev->dma;
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
xf86drmClipRectRec *pbox = sarea_priv->boxes;
int nbox = sarea_priv->nbox;
drm_buf_t *buf;
drm_dma_t d;
int order = 10; /* ??? */
int i;
DMALOCALS;
if (!nbox)
return -EINVAL;
buf = drm_freelist_get(&dma->bufs[order].freelist, _DRM_DMA_WAIT);
DMAGETPTR(buf);
DMAOUTREG(MGAREG_DSTORG, dev_priv->frontOrg);
DMAOUTREG(MGAREG_MACCESS, dev_priv->mAccess);
DMAOUTREG(MGAREG_SRCORG, dev_priv->backOrg);
DMAOUTREG(MGAREG_AR5, dev_priv->stride); /* unnecessary? */
DMAOUTREG(MGAREG_DWGCTL, MGA_COPY_CMD);
for (i = 0 ; i < nbox; i++) {
unsigned int h = pbox[i].y2 - pbox[i].y1;
unsigned int start = pbox[i].y1 * dev_priv->stride;
/*
if (pbox[i].x1 > dev_priv->width) continue;
if (pbox[i].y1 > dev_priv->height) continue;
if (pbox[i].x2 > dev_priv->width) continue;
if (pbox[i].y2 > dev_priv->height) continue;
if (pbox[i].x2 <= pbox[i].x1) continue;
if (pbox[i].y2 <= pbox[i].x1) continue;
*/
DMAOUTREG(MGAREG_AR0, start + pbox[i].x2 - 1);
DMAOUTREG(MGAREG_AR3, start + pbox[i].x1);
DMAOUTREG(MGAREG_FXBNDRY, pbox[i].x1|((pbox[i].x2 - 1)<<16));
DMAOUTREG(MGAREG_YDSTLEN+MGAREG_MGA_EXEC, (pbox[i].y1<<16)|h);
}
DMAOUTREG(MGAREG_SRCORG, 0);
DMAADVANCE( buf );
/* Make sure we restore the 3D state next time.
*/
sarea_priv->dirty |= MGASAREA_NEW_CONTEXT;
((drm_mga_buf_priv_t *)buf->dev_private)->dma_type = MGA_DMA_GENERAL;
d.context = DRM_KERNEL_CONTEXT;
d.send_count = 1;
d.send_indices = &buf->idx;
d.send_sizes = &buf->used;
d.flags = 0;
d.request_count = 0;
d.request_size = 0;
d.request_indices = NULL;
d.request_sizes = NULL;
d.granted_count = 0;
atomic_inc(&dev_priv->pending_bufs);
if((drm_dma_enqueue(dev, &d)) != 0)
atomic_dec(&dev_priv->pending_bufs);
mga_dma_schedule(dev, 1);
return 0;
}
static int mgaIload(drm_device_t *dev, drm_mga_iload_t *args)
{
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf = dma->buflist[ args->idx ];
drm_mga_buf_priv_t *buf_priv = (drm_mga_buf_priv_t *)buf->dev_private;
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_dma_t d;
int pixperdword;
buf_priv->dma_type = MGA_DMA_ILOAD;
buf_priv->boxes[0].y1 = args->texture.y1;
buf_priv->boxes[0].y2 = args->texture.y2;
buf_priv->boxes[0].x1 = args->texture.x1;
buf_priv->boxes[0].x2 = args->texture.x2;
buf_priv->ContextState[MGA_CTXREG_DSTORG] = args->destOrg;
buf_priv->ContextState[MGA_CTXREG_MACCESS] = args->mAccess;
buf_priv->ServerState[MGA_2DREG_PITCH] = args->pitch;
buf_priv->nbox = 1;
sarea_priv->dirty |= (MGASAREA_NEW_CONTEXT | MGASAREA_NEW_2D);
switch((args->mAccess & 0x00000003)) {
case 0:
pixperdword = 4;
break;
case 1:
pixperdword = 2;
break;
case 2:
pixperdword = 1;
break;
default:
DRM_ERROR("Invalid maccess value passed"
" to mgaIload\n");
return -EINVAL;
}
buf->used = ((args->texture.y2 - args->texture.y1) *
(args->texture.x2 - args->texture.x1) /
pixperdword);
DRM_DEBUG("buf->used : %d\n", buf->used);
d.context = DRM_KERNEL_CONTEXT;
d.send_count = 1;
d.send_indices = &buf->idx;
d.send_sizes = &buf->used;
d.flags = 0;
d.request_count = 0;
d.request_size = 0;
d.request_indices = NULL;
d.request_sizes = NULL;
d.granted_count = 0;
atomic_inc(&dev_priv->pending_bufs);
if((drm_dma_enqueue(dev, &d)) != 0)
atomic_dec(&dev_priv->pending_bufs);
mga_dma_schedule(dev, 1);
return 0;
}
/* Necessary? Not necessary??
*/
static int check_lock(void)
{
return 1;
}
int mga_clear_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_mga_clear_t clear;
int retcode;
copy_from_user_ret(&clear, (drm_mga_clear_t *)arg,
sizeof(clear), -EFAULT);
/* if (!check_lock( dev )) */
/* return -EIEIO; */
retcode = mgaClearBuffers(dev, clear.clear_color,
clear.clear_depth,
clear.flags);
return retcode;
}
int mga_swap_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_mga_swap_t swap;
int retcode = 0;
/* if (!check_lock( dev )) */
/* return -EIEIO; */
copy_from_user_ret(&swap, (drm_mga_swap_t *)arg,
sizeof(swap), -EFAULT);
retcode = mgaSwapBuffers(dev, swap.flags);
return retcode;
}
int mga_iload(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_mga_iload_t iload;
int retcode = 0;
/* if (!check_lock( dev )) */
/* return -EIEIO; */
copy_from_user_ret(&iload, (drm_mga_iload_t *)arg,
sizeof(iload), -EFAULT);
retcode = mgaIload(dev, &iload);
return retcode;
}
int mga_dma(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
int retcode = 0;
drm_dma_t d;
copy_from_user_ret(&d, (drm_dma_t *)arg, sizeof(d), -EFAULT);
DRM_DEBUG("%d %d: %d send, %d req\n",
current->pid, d.context, d.send_count, d.request_count);
/* Per-context queues are unworkable if you are trying to do
* state management from the client.
*/
d.context = DRM_KERNEL_CONTEXT;
d.flags &= ~_DRM_DMA_WHILE_LOCKED;
/* Maybe multiple buffers is useful for iload...
* But this ioctl is only for *despatching* vertex data...
*/
if (d.send_count < 0 || d.send_count > 1) {
DRM_ERROR("Process %d trying to send %d buffers (max 1)\n",
current->pid, d.send_count);
return -EINVAL;
}
/* But it *is* used to request buffers for all types of dma:
*/
if (d.request_count < 0 || d.request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
current->pid, d.request_count, dma->buf_count);
return -EINVAL;
}
if (d.send_count) {
int idx = d.send_indices[0];
drm_mga_buf_priv_t *buf_priv = dma->buflist[ idx ]->dev_private;
drm_mga_private_t *dev_priv = dev->dev_private;
buf_priv->dma_type = MGA_DMA_VERTEX;
/* if (!check_lock( dev )) */
/* return -EIEIO; */
/* Snapshot the relevent bits of the sarea...
*/
mgaCopyAndVerifyState( dev_priv, buf_priv );
atomic_inc(&dev_priv->pending_bufs);
retcode = drm_dma_enqueue(dev, &d);
if(retcode != 0)
atomic_dec(&dev_priv->pending_bufs);
mga_dma_schedule(dev, 1);
}
d.granted_count = 0;
if (!retcode && d.request_count) {
retcode = drm_dma_get_buffers(dev, &d);
}
DRM_DEBUG("%d returning, granted = %d\n",
current->pid, d.granted_count);
copy_to_user_ret((drm_dma_t *)arg, &d, sizeof(d), -EFAULT);
return retcode;
}

245
linux/mga_context.c Normal file
View File

@ -0,0 +1,245 @@
/* mga_context.c -- IOCTLs for mga contexts -*- linux-c -*-
* Created: Mon Dec 13 09:51:35 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Rickard E. (Rik) Faith <faith@precisioninsight.com>
*
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_context.c,v 1.1 2000/02/11 17:26:06 dawes Exp $
*
*/
#include <linux/sched.h>
#define __NO_VERSION__
#include "drmP.h"
#include "mga_drv.h"
static int mga_alloc_queue(drm_device_t *dev)
{
int temp = drm_ctxbitmap_next(dev);
printk("mga_alloc_queue: %d\n", temp);
return temp;
}
int mga_context_switch(drm_device_t *dev, int old, int new)
{
char buf[64];
atomic_inc(&dev->total_ctx);
if (test_and_set_bit(0, &dev->context_flag)) {
DRM_ERROR("Reentering -- FIXME\n");
return -EBUSY;
}
#if DRM_DMA_HISTOGRAM
dev->ctx_start = get_cycles();
#endif
printk("Context switch from %d to %d\n", old, new);
if (new == dev->last_context) {
clear_bit(0, &dev->context_flag);
return 0;
}
if (drm_flags & DRM_FLAG_NOCTX) {
mga_context_switch_complete(dev, new);
} else {
sprintf(buf, "C %d %d\n", old, new);
drm_write_string(dev, buf);
}
return 0;
}
int mga_context_switch_complete(drm_device_t *dev, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = jiffies;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
}
/* If a context switch is ever initiated
when the kernel holds the lock, release
that lock here. */
#if DRM_DMA_HISTOGRAM
atomic_inc(&dev->histo.ctx[drm_histogram_slot(get_cycles()
- dev->ctx_start)]);
#endif
clear_bit(0, &dev->context_flag);
wake_up(&dev->context_wait);
return 0;
}
int mga_resctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_ctx_res_t res;
drm_ctx_t ctx;
int i;
printk("%d\n", DRM_RESERVED_CONTEXTS);
copy_from_user_ret(&res, (drm_ctx_res_t *)arg, sizeof(res), -EFAULT);
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
copy_to_user_ret(&res.contexts[i],
&i,
sizeof(i),
-EFAULT);
}
}
res.count = DRM_RESERVED_CONTEXTS;
copy_to_user_ret((drm_ctx_res_t *)arg, &res, sizeof(res), -EFAULT);
return 0;
}
int mga_addctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
if ((ctx.handle = mga_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
ctx.handle = mga_alloc_queue(dev);
}
if (ctx.handle == -1) {
printk("Not enough free contexts.\n");
/* Should this return -EBUSY instead? */
return -ENOMEM;
}
printk("%d\n", ctx.handle);
copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT);
return 0;
}
int mga_modctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
/* This does nothing for the mga */
return 0;
}
int mga_getctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t*)arg, sizeof(ctx), -EFAULT);
/* This is 0, because we don't hanlde any context flags */
ctx.flags = 0;
copy_to_user_ret((drm_ctx_t*)arg, &ctx, sizeof(ctx), -EFAULT);
return 0;
}
int mga_switchctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
printk("%d\n", ctx.handle);
return mga_context_switch(dev, dev->last_context, ctx.handle);
}
int mga_newctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
printk("%d\n", ctx.handle);
mga_context_switch_complete(dev, ctx.handle);
return 0;
}
int mga_rmctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
drm_queue_t *q;
drm_buf_t *buf;
copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
printk("%d\n", ctx.handle);
if(ctx.handle == DRM_KERNEL_CONTEXT) {
q = dev->queuelist[ctx.handle];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
/* No longer in use */
atomic_dec(&q->use_count);
return -EINVAL;
}
atomic_inc(&q->finalization); /* Mark queue in finalization state */
atomic_sub(2, &q->use_count);
/* Mark queue as unused (pending finalization) */
while (test_and_set_bit(0, &dev->interrupt_flag)) {
printk("Calling schedule from rmctx\n");
schedule();
if (signal_pending(current)) {
clear_bit(0, &dev->interrupt_flag);
return -EINTR;
}
}
/* Remove queued buffers */
while ((buf = drm_waitlist_get(&q->waitlist))) {
drm_free_buffer(dev, buf);
}
clear_bit(0, &dev->interrupt_flag);
/* Wakeup blocked processes */
wake_up_interruptible(&q->read_queue);
wake_up_interruptible(&q->write_queue);
wake_up_interruptible(&q->flush_queue);
/* Finalization over. Queue is made
available when both use_count and
finalization become 0, which won't
happen until all the waiting processes
stop waiting. */
atomic_dec(&q->finalization);
} else {
drm_ctxbitmap_free(dev, ctx.handle);
}
return 0;
}

978
linux/mga_dma.c Normal file
View File

@ -0,0 +1,978 @@
/* mga_dma.c -- DMA support for mga g200/g400 -*- linux-c -*-
* Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
* Jeff Hartmann <jhartmann@precisioninsight.com>
*
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_dma.c,v 1.1 2000/02/11 17:26:07 dawes Exp $
*
*/
#define __NO_VERSION__
#include "drmP.h"
#include "mga_drv.h"
#include "mgareg_flags.h"
#include "mga_dma.h"
#include "mga_state.h"
#include <linux/interrupt.h> /* For task queue support */
#define MGA_REG(reg) 2
#define MGA_BASE(reg) ((unsigned long) \
((drm_device_t *)dev)->maplist[MGA_REG(reg)]->handle)
#define MGA_ADDR(reg) (MGA_BASE(reg) + reg)
#define MGA_DEREF(reg) *(__volatile__ int *)MGA_ADDR(reg)
#define MGA_READ(reg) MGA_DEREF(reg)
#define MGA_WRITE(reg,val) do { MGA_DEREF(reg) = val; } while (0)
#define PDEA_pagpxfer_enable 0x2
#define MGA_SYNC_TAG 0x423f4200
typedef enum {
TT_GENERAL,
TT_BLIT,
TT_VECTOR,
TT_VERTEX
} transferType_t;
static void mga_delay(void)
{
return;
}
int mga_dma_cleanup(drm_device_t *dev)
{
if(dev->dev_private) {
drm_mga_private_t *dev_priv =
(drm_mga_private_t *) dev->dev_private;
if(dev_priv->ioremap) {
int temp = (dev_priv->warp_ucode_size +
dev_priv->primary_size +
PAGE_SIZE - 1) / PAGE_SIZE * PAGE_SIZE;
drm_ioremapfree((void *) dev_priv->ioremap, temp);
}
drm_free(dev->dev_private, sizeof(drm_mga_private_t),
DRM_MEM_DRIVER);
dev->dev_private = NULL;
}
return 0;
}
static int mga_alloc_kernel_queue(drm_device_t *dev)
{
drm_queue_t *queue = NULL;
/* Allocate a new queue */
down(&dev->struct_sem);
if(dev->queue_count != 0) {
/* Reseting the kernel context here is not
* a race, since it can only happen when that
* queue is empty.
*/
queue = dev->queuelist[DRM_KERNEL_CONTEXT];
printk("Kernel queue already allocated\n");
} else {
queue = drm_alloc(sizeof(*queue), DRM_MEM_QUEUES);
if(!queue) {
up(&dev->struct_sem);
printk("out of memory\n");
return -ENOMEM;
}
++dev->queue_count;
dev->queuelist = drm_alloc(sizeof(*dev->queuelist),
DRM_MEM_QUEUES);
if(!dev->queuelist) {
up(&dev->struct_sem);
drm_free(queue, sizeof(*queue), DRM_MEM_QUEUES);
printk("out of memory\n");
return -ENOMEM;
}
}
memset(queue, 0, sizeof(*queue));
atomic_set(&queue->use_count, 1);
atomic_set(&queue->finalization, 0);
atomic_set(&queue->block_count, 0);
atomic_set(&queue->block_read, 0);
atomic_set(&queue->block_write, 0);
atomic_set(&queue->total_queued, 0);
atomic_set(&queue->total_flushed, 0);
atomic_set(&queue->total_locks, 0);
init_waitqueue_head(&queue->write_queue);
init_waitqueue_head(&queue->read_queue);
init_waitqueue_head(&queue->flush_queue);
queue->flags = 0;
drm_waitlist_create(&queue->waitlist, dev->dma->buf_count);
dev->queue_slots = 1;
dev->queuelist[DRM_KERNEL_CONTEXT] = queue;
dev->queue_count--;
up(&dev->struct_sem);
printk("%d (new)\n", dev->queue_count - 1);
return DRM_KERNEL_CONTEXT;
}
static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) {
drm_mga_private_t *dev_priv;
drm_map_t *prim_map = NULL;
drm_map_t *sarea_map = NULL;
int temp;
dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
if(dev_priv == NULL) return -ENOMEM;
dev->dev_private = (void *) dev_priv;
printk("dev_private\n");
memset(dev_priv, 0, sizeof(drm_mga_private_t));
atomic_set(&dev_priv->pending_bufs, 0);
if((init->reserved_map_idx >= dev->map_count) ||
(init->buffer_map_idx >= dev->map_count)) {
mga_dma_cleanup(dev);
printk("reserved_map or buffer_map are invalid\n");
return -EINVAL;
}
if(mga_alloc_kernel_queue(dev) != DRM_KERNEL_CONTEXT) {
mga_dma_cleanup(dev);
DRM_ERROR("Kernel context queue not present\n");
}
dev_priv->reserved_map_idx = init->reserved_map_idx;
dev_priv->buffer_map_idx = init->buffer_map_idx;
sarea_map = dev->maplist[0];
dev_priv->sarea_priv = (drm_mga_sarea_t *)
((u8 *)sarea_map->handle +
init->sarea_priv_offset);
printk("sarea_priv\n");
/* Scale primary size to the next page */
dev_priv->primary_size = ((init->primary_size + PAGE_SIZE - 1) /
PAGE_SIZE) * PAGE_SIZE;
dev_priv->warp_ucode_size = init->warp_ucode_size;
dev_priv->chipset = init->chipset;
dev_priv->fbOffset = init->fbOffset;
dev_priv->backOffset = init->backOffset;
dev_priv->depthOffset = init->depthOffset;
dev_priv->textureOffset = init->textureOffset;
dev_priv->textureSize = init->textureSize;
dev_priv->cpp = init->cpp;
dev_priv->sgram = init->sgram;
dev_priv->stride = init->stride;
dev_priv->frontOrg = init->frontOrg;
dev_priv->backOrg = init->backOrg;
dev_priv->depthOrg = init->depthOrg;
dev_priv->mAccess = init->mAccess;
printk("memcpy\n");
memcpy(&dev_priv->WarpIndex, &init->WarpIndex,
sizeof(mgaWarpIndex) * MGA_MAX_WARP_PIPES);
printk("memcpy done\n");
prim_map = dev->maplist[init->reserved_map_idx];
dev_priv->prim_phys_head = dev->agp->base + init->reserved_map_agpstart;
temp = init->warp_ucode_size + dev_priv->primary_size;
temp = ((temp + PAGE_SIZE - 1) /
PAGE_SIZE) * PAGE_SIZE;
printk("temp : %x\n", temp);
printk("dev->agp->base: %lx\n", dev->agp->base);
printk("init->reserved_map_agpstart: %x\n", init->reserved_map_agpstart);
dev_priv->ioremap = drm_ioremap(dev->agp->base + init->reserved_map_agpstart,
temp);
if(dev_priv->ioremap == NULL) {
printk("Ioremap failed\n");
mga_dma_cleanup(dev);
return -ENOMEM;
}
dev_priv->prim_head = (u32 *)dev_priv->ioremap;
printk("dev_priv->prim_head : %p\n", dev_priv->prim_head);
dev_priv->current_dma_ptr = dev_priv->prim_head;
dev_priv->prim_num_dwords = 0;
dev_priv->prim_max_dwords = dev_priv->primary_size / 4;
printk("dma initialization\n");
/* Private is now filled in, initialize the hardware */
{
PRIMLOCALS;
PRIMRESET( dev_priv );
PRIMGETPTR( dev_priv );
PRIMOUTREG(MGAREG_DMAPAD, 0);
PRIMOUTREG(MGAREG_DMAPAD, 0);
PRIMOUTREG(MGAREG_DWGSYNC, 0);
PRIMOUTREG(MGAREG_SOFTRAP, 0);
PRIMADVANCE( dev_priv );
/* Poll for the first buffer to insure that
* the status register will be correct
*/
printk("phys_head : %lx\n", phys_head);
MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) {
int i;
for(i = 0 ; i < 4096; i++) mga_delay();
}
MGA_WRITE(MGAREG_PRIMADDRESS, phys_head | TT_GENERAL);
MGA_WRITE(MGAREG_PRIMEND, ((phys_head + num_dwords * 4) |
PDEA_pagpxfer_enable));
while(MGA_READ(MGAREG_DWGSYNC) == MGA_SYNC_TAG) {
int i;
for(i = 0; i < 4096; i++) mga_delay();
}
}
printk("dma init was successful\n");
return 0;
}
int mga_dma_init(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_mga_init_t init;
copy_from_user_ret(&init, (drm_mga_init_t *)arg, sizeof(init), -EFAULT);
switch(init.func) {
case MGA_INIT_DMA:
return mga_dma_initialize(dev, &init);
case MGA_CLEANUP_DMA:
return mga_dma_cleanup(dev);
}
return -EINVAL;
}
#define MGA_ILOAD_CMD (DC_opcod_iload | DC_atype_rpl | \
DC_linear_linear | DC_bltmod_bfcol | \
(0xC << DC_bop_SHIFT) | DC_sgnzero_enable | \
DC_shftzero_enable | DC_clipdis_enable)
static void __mga_iload_small(drm_device_t *dev,
drm_buf_t *buf,
int use_agp)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_buf_priv_t *buf_priv = buf->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned long address = (unsigned long)buf->bus_address;
int length = buf->used;
int y1 = buf_priv->boxes[0].y1;
int x1 = buf_priv->boxes[0].x1;
int y2 = buf_priv->boxes[0].y2;
int x2 = buf_priv->boxes[0].x2;
int dstorg = buf_priv->ContextState[MGA_CTXREG_DSTORG];
int maccess = buf_priv->ContextState[MGA_CTXREG_MACCESS];
PRIMLOCALS;
PRIMRESET(dev_priv);
PRIMGETPTR(dev_priv);
PRIMOUTREG(MGAREG_DSTORG, dstorg | use_agp);
PRIMOUTREG(MGAREG_MACCESS, maccess);
PRIMOUTREG(MGAREG_PITCH, (1 << 15));
PRIMOUTREG(MGAREG_YDST, y1 * (x2 - x1));
PRIMOUTREG(MGAREG_LEN, 1);
PRIMOUTREG(MGAREG_FXBNDRY, ((x2 - x1) * (y2 - y1) - 1) << 16);
PRIMOUTREG(MGAREG_AR0, (x2 - x1) * (y2 - y1) - 1);
PRIMOUTREG(MGAREG_AR3, 0);
PRIMOUTREG(MGAREG_DMAPAD, 0);
PRIMOUTREG(MGAREG_DMAPAD, 0);
PRIMOUTREG(MGAREG_DMAPAD, 0);
PRIMOUTREG(MGAREG_DWGCTL+MGAREG_MGA_EXEC, MGA_ILOAD_CMD);
PRIMOUTREG(MGAREG_DMAPAD, 0);
PRIMOUTREG(MGAREG_DMAPAD, 0);
PRIMOUTREG(MGAREG_SECADDRESS, address | TT_BLIT);
PRIMOUTREG(MGAREG_SECEND, (address + length) | use_agp);
PRIMOUTREG(MGAREG_DMAPAD, 0);
PRIMOUTREG(MGAREG_DMAPAD, 0);
PRIMOUTREG(MGAREG_DWGSYNC, 0);
PRIMOUTREG(MGAREG_SOFTRAP, 0);
PRIMADVANCE(dev_priv);
#if 0
/* For now we need to set this in the ioctl */
sarea_priv->dirty |= MGASAREA_NEW_CONTEXT;
#endif
MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) ;
MGA_WRITE(MGAREG_PRIMADDRESS, dev_priv->prim_phys_head | TT_GENERAL);
MGA_WRITE(MGAREG_PRIMEND, (phys_head + num_dwords * 4) | use_agp);
}
static void __mga_iload_xy(drm_device_t *dev,
drm_buf_t *buf,
int use_agp)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_buf_priv_t *buf_priv = buf->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned long address = (unsigned long)buf->bus_address;
int length = buf->used;
int y1 = buf_priv->boxes[0].y1;
int x1 = buf_priv->boxes[0].x1;
int y2 = buf_priv->boxes[0].y2;
int x2 = buf_priv->boxes[0].x2;
int dstorg = buf_priv->ContextState[MGA_CTXREG_DSTORG];
int maccess = buf_priv->ContextState[MGA_CTXREG_MACCESS];
int pitch = buf_priv->ServerState[MGA_2DREG_PITCH];
int width, height;
int texperdword = 0;
PRIMLOCALS;
width = (x2 - x1);
height = (y2 - y1);
switch((maccess & 0x00000003)) {
case 0:
texperdword = 4;
break;
case 1:
texperdword = 2;
break;
case 2:
texperdword = 1;
break;
default:
DRM_ERROR("Invalid maccess value passed to __mga_iload_xy\n");
return;
}
x2 = x1 + width;
x2 = (x2 + (texperdword - 1)) & ~(texperdword - 1);
x1 = (x1 + (texperdword - 1)) & ~(texperdword - 1);
width = x2 - x1;
PRIMRESET(dev_priv);
PRIMGETPTR(dev_priv);
PRIMOUTREG(MGAREG_DSTORG, dstorg | use_agp);
PRIMOUTREG(MGAREG_MACCESS, maccess);
PRIMOUTREG(MGAREG_PITCH, pitch);
PRIMOUTREG(MGAREG_YDSTLEN, (y1 << 16) | height);
PRIMOUTREG(MGAREG_FXBNDRY, ((x1+width-1) << 16) | x1);
PRIMOUTREG(MGAREG_AR0, width * height - 1);
PRIMOUTREG(MGAREG_AR3, 0 );
PRIMOUTREG(MGAREG_DWGCTL+MGAREG_MGA_EXEC, MGA_ILOAD_CMD);
PRIMOUTREG(MGAREG_DMAPAD, 0);
PRIMOUTREG(MGAREG_DMAPAD, 0);
PRIMOUTREG(MGAREG_SECADDRESS, address | TT_BLIT);
PRIMOUTREG(MGAREG_SECEND, (address + length) | use_agp);
PRIMOUTREG(MGAREG_DMAPAD, 0);
PRIMOUTREG(MGAREG_DMAPAD, 0);
PRIMOUTREG(MGAREG_DWGSYNC, 0);
PRIMOUTREG(MGAREG_SOFTRAP, 0);
PRIMADVANCE(dev_priv);
#if 0
/* For now we need to set this in the ioctl */
sarea_priv->dirty |= MGASAREA_NEW_CONTEXT;
#endif
MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) ;
MGA_WRITE(MGAREG_PRIMADDRESS, dev_priv->prim_phys_head | TT_GENERAL);
MGA_WRITE(MGAREG_PRIMEND, (phys_head + num_dwords * 4) | use_agp);
}
static void mga_dma_dispatch_iload(drm_device_t *dev, drm_buf_t *buf)
{
drm_mga_buf_priv_t *buf_priv = buf->dev_private;
int use_agp = PDEA_pagpxfer_enable;
int x1 = buf_priv->boxes[0].x1;
int x2 = buf_priv->boxes[0].x2;
if((x2 - x1) < 32) {
printk("using iload small\n");
__mga_iload_small(dev, buf, use_agp);
} else {
printk("using iload xy\n");
__mga_iload_xy(dev, buf, use_agp);
}
}
static void mga_dma_dispatch_vertex(drm_device_t *dev, drm_buf_t *buf)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_buf_priv_t *buf_priv = buf->dev_private;
unsigned long address = (unsigned long)buf->bus_address;
int length = buf->used;
int use_agp = PDEA_pagpxfer_enable;
int i, count;
PRIMLOCALS;
PRIMRESET(dev_priv);
count = buf_priv->nbox;
if (count == 0)
count = 1;
mgaEmitState( dev_priv, buf_priv );
for (i = 0 ; i < count ; i++) {
if (i < buf_priv->nbox)
mgaEmitClipRect( dev_priv, &buf_priv->boxes[i] );
PRIMGETPTR(dev_priv);
PRIMOUTREG( MGAREG_DMAPAD, 0);
PRIMOUTREG( MGAREG_DMAPAD, 0);
PRIMOUTREG( MGAREG_SECADDRESS, address | TT_VERTEX);
PRIMOUTREG( MGAREG_SECEND, (address + length) | use_agp);
PRIMOUTREG( MGAREG_DMAPAD, 0);
PRIMOUTREG( MGAREG_DMAPAD, 0);
PRIMOUTREG( MGAREG_DWGSYNC, 0);
PRIMOUTREG( MGAREG_SOFTRAP, 0);
PRIMADVANCE(dev_priv);
}
PRIMGETPTR( dev_priv );
MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) ;
MGA_WRITE(MGAREG_PRIMADDRESS, dev_priv->prim_phys_head | TT_GENERAL);
MGA_WRITE(MGAREG_PRIMEND, (phys_head + num_dwords * 4) | use_agp);
}
/* Used internally for the small buffers generated from client state
* information.
*/
static void mga_dma_dispatch_general(drm_device_t *dev, drm_buf_t *buf)
{
drm_mga_private_t *dev_priv = dev->dev_private;
unsigned long address = (unsigned long)buf->bus_address;
int length = buf->used;
int use_agp = PDEA_pagpxfer_enable;
PRIMLOCALS;
PRIMRESET(dev_priv);
PRIMGETPTR(dev_priv);
PRIMOUTREG( MGAREG_DMAPAD, 0);
PRIMOUTREG( MGAREG_DMAPAD, 0);
PRIMOUTREG( MGAREG_SECADDRESS, address | TT_GENERAL);
PRIMOUTREG( MGAREG_SECEND, (address + length) | use_agp);
PRIMOUTREG( MGAREG_DMAPAD, 0);
PRIMOUTREG( MGAREG_DMAPAD, 0);
PRIMOUTREG( MGAREG_DWGSYNC, 0);
PRIMOUTREG( MGAREG_SOFTRAP, 0);
PRIMADVANCE(dev_priv);
MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) ;
MGA_WRITE(MGAREG_PRIMADDRESS, dev_priv->prim_phys_head | TT_GENERAL);
MGA_WRITE(MGAREG_PRIMEND, (phys_head + num_dwords * 4) | use_agp);
}
/* Frees dispatch lock */
static inline void mga_dma_quiescent(drm_device_t *dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
while(1) {
atomic_inc(&dev_priv->dispatch_lock);
if(atomic_read(&dev_priv->dispatch_lock) == 1) {
break;
} else {
atomic_dec(&dev_priv->dispatch_lock);
}
}
while((MGA_READ(MGAREG_STATUS) & 0x00020001) != 0x00020000) ;
#if 0
MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
#endif
while(MGA_READ(MGAREG_DWGSYNC) == MGA_SYNC_TAG) ;
MGA_WRITE(MGAREG_DWGSYNC, MGA_SYNC_TAG);
while(MGA_READ(MGAREG_DWGSYNC) != MGA_SYNC_TAG) ;
atomic_dec(&dev_priv->dispatch_lock);
}
/* Keeps dispatch lock held */
static inline int mga_dma_is_ready(drm_device_t *dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
atomic_inc(&dev_priv->dispatch_lock);
if(atomic_read(&dev_priv->dispatch_lock) == 1) {
/* We got the lock */
return 1;
} else {
atomic_dec(&dev_priv->dispatch_lock);
return 0;
}
}
static inline int mga_dma_is_ready_no_hold(drm_device_t *dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
atomic_inc(&dev_priv->dispatch_lock);
if(atomic_read(&dev_priv->dispatch_lock) == 1) {
/* We got the lock, but free it */
atomic_dec(&dev_priv->dispatch_lock);
return 1;
} else {
atomic_dec(&dev_priv->dispatch_lock);
return 0;
}
}
static void mga_dma_service(int irq, void *device, struct pt_regs *regs)
{
drm_device_t *dev = (drm_device_t *)device;
drm_device_dma_t *dma = dev->dma;
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
atomic_dec(&dev_priv->dispatch_lock);
atomic_inc(&dev->total_irq);
MGA_WRITE(MGAREG_ICLEAR, 0xfa7);
/* Free previous buffer */
if (test_and_set_bit(0, &dev->dma_flag)) {
atomic_inc(&dma->total_missed_free);
return;
}
if (dma->this_buffer) {
drm_free_buffer(dev, dma->this_buffer);
dma->this_buffer = NULL;
}
clear_bit(0, &dev->dma_flag);
/* Dispatch new buffer */
queue_task(&dev->tq, &tq_immediate);
mark_bh(IMMEDIATE_BH);
}
/* Only called by mga_dma_schedule. */
static int mga_do_dma(drm_device_t *dev, int locked)
{
drm_buf_t *buf;
int retcode = 0;
drm_device_dma_t *dma = dev->dma;
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
drm_mga_buf_priv_t *buf_priv;
printk("mga_do_dma\n");
if (test_and_set_bit(0, &dev->dma_flag)) {
atomic_inc(&dma->total_missed_dma);
return -EBUSY;
}
if (!dma->next_buffer) {
DRM_ERROR("No next_buffer\n");
clear_bit(0, &dev->dma_flag);
return -EINVAL;
}
buf = dma->next_buffer;
printk("context %d, buffer %d\n", buf->context, buf->idx);
if (buf->list == DRM_LIST_RECLAIM) {
drm_clear_next_buffer(dev);
drm_free_buffer(dev, buf);
clear_bit(0, &dev->dma_flag);
return -EINVAL;
}
if (!buf->used) {
DRM_ERROR("0 length buffer\n");
drm_clear_next_buffer(dev);
drm_free_buffer(dev, buf);
clear_bit(0, &dev->dma_flag);
return 0;
}
if (mga_dma_is_ready(dev) == 0) {
clear_bit(0, &dev->dma_flag);
return -EBUSY;
}
/* Always hold the hardware lock while dispatching.
*/
if (!locked && !drm_lock_take(&dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
atomic_inc(&dma->total_missed_lock);
clear_bit(0, &dev->dma_flag);
atomic_dec(&dev_priv->dispatch_lock);
return -EBUSY;
}
dma->next_queue = dev->queuelist[DRM_KERNEL_CONTEXT];
drm_clear_next_buffer(dev);
buf->pending = 1;
buf->waiting = 0;
buf->list = DRM_LIST_PEND;
buf_priv = buf->dev_private;
printk("dispatch!\n");
switch (buf_priv->dma_type) {
case MGA_DMA_GENERAL:
mga_dma_dispatch_general(dev, buf);
break;
case MGA_DMA_VERTEX:
mga_dma_dispatch_vertex(dev, buf);
break;
/* case MGA_DMA_SETUP: */
/* mga_dma_dispatch_setup(dev, address, length); */
/* break; */
case MGA_DMA_ILOAD:
mga_dma_dispatch_iload(dev, buf);
break;
default:
printk("bad buffer type %x in dispatch\n", buf_priv->dma_type);
break;
}
atomic_dec(&dev_priv->pending_bufs);
drm_free_buffer(dev, dma->this_buffer);
dma->this_buffer = buf;
atomic_add(buf->used, &dma->total_bytes);
atomic_inc(&dma->total_dmas);
if (!locked) {
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
}
clear_bit(0, &dev->dma_flag);
if(!atomic_read(&dev_priv->pending_bufs)) {
wake_up_interruptible(&dev->queuelist[DRM_KERNEL_CONTEXT]->flush_queue);
}
#if 0
wake_up_interruptible(&dev->lock.lock_queue);
#endif
/* We hold the dispatch lock until the interrupt handler
* frees it
*/
return retcode;
}
static void mga_dma_schedule_timer_wrapper(unsigned long dev)
{
mga_dma_schedule((drm_device_t *)dev, 0);
}
static void mga_dma_schedule_tq_wrapper(void *dev)
{
mga_dma_schedule(dev, 0);
}
int mga_dma_schedule(drm_device_t *dev, int locked)
{
drm_queue_t *q;
drm_buf_t *buf;
int retcode = 0;
int processed = 0;
int missed;
int expire = 20;
drm_device_dma_t *dma = dev->dma;
printk("mga_dma_schedule\n");
if (test_and_set_bit(0, &dev->interrupt_flag)) {
/* Not reentrant */
atomic_inc(&dma->total_missed_sched);
return -EBUSY;
}
missed = atomic_read(&dma->total_missed_sched);
again:
/* There is only one queue:
*/
if (!dma->next_buffer && DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
q = dev->queuelist[DRM_KERNEL_CONTEXT];
buf = drm_waitlist_get(&q->waitlist);
dma->next_buffer = buf;
dma->next_queue = q;
if (buf && buf->list == DRM_LIST_RECLAIM) {
drm_clear_next_buffer(dev);
drm_free_buffer(dev, buf);
}
}
if (dma->next_buffer) {
if (!(retcode = mga_do_dma(dev, locked)))
++processed;
}
/* Try again if we succesfully dispatched a buffer, or if someone
* tried to schedule while we were working.
*/
if (--expire) {
if (missed != atomic_read(&dma->total_missed_sched)) {
atomic_inc(&dma->total_lost);
if (mga_dma_is_ready_no_hold(dev))
goto again;
}
if (processed && mga_dma_is_ready_no_hold(dev)) {
atomic_inc(&dma->total_lost);
processed = 0;
goto again;
}
}
clear_bit(0, &dev->interrupt_flag);
return retcode;
}
int mga_irq_install(drm_device_t *dev, int irq)
{
int retcode;
if (!irq) return -EINVAL;
down(&dev->struct_sem);
if (dev->irq) {
up(&dev->struct_sem);
return -EBUSY;
}
dev->irq = irq;
up(&dev->struct_sem);
printk("install irq handler %d\n", irq);
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma_flag = 0;
dev->dma->next_buffer = NULL;
dev->dma->next_queue = NULL;
dev->dma->this_buffer = NULL;
dev->tq.next = NULL;
dev->tq.sync = 0;
dev->tq.routine = mga_dma_schedule_tq_wrapper;
dev->tq.data = dev;
/* Before installing handler */
MGA_WRITE(MGAREG_ICLEAR, 0xfa7);
MGA_WRITE(MGAREG_IEN, 0);
/* Install handler */
if ((retcode = request_irq(dev->irq,
mga_dma_service,
0,
dev->devname,
dev))) {
down(&dev->struct_sem);
dev->irq = 0;
up(&dev->struct_sem);
return retcode;
}
/* After installing handler */
MGA_WRITE(MGAREG_ICLEAR, 0xfa7);
MGA_WRITE(MGAREG_IEN, 0x00000001);
return 0;
}
int mga_irq_uninstall(drm_device_t *dev)
{
int irq;
down(&dev->struct_sem);
irq = dev->irq;
dev->irq = 0;
up(&dev->struct_sem);
if (!irq) return -EINVAL;
printk("remove irq handler %d\n", irq);
MGA_WRITE(MGAREG_ICLEAR, 0xfa7);
MGA_WRITE(MGAREG_IEN, 0);
MGA_WRITE(MGAREG_ICLEAR, 0xfa7);
free_irq(irq, dev);
return 0;
}
int mga_control(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_control_t ctl;
copy_from_user_ret(&ctl, (drm_control_t *)arg, sizeof(ctl), -EFAULT);
switch (ctl.func) {
case DRM_INST_HANDLER:
return mga_irq_install(dev, ctl.irq);
case DRM_UNINST_HANDLER:
return mga_irq_uninstall(dev);
default:
return -EINVAL;
}
}
int mga_flush_queue(drm_device_t *dev)
{
DECLARE_WAITQUEUE(entry, current);
drm_queue_t *q = dev->queuelist[DRM_KERNEL_CONTEXT];
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
int ret = 0;
printk("mga_flush_queue\n");
if(atomic_read(&dev_priv->pending_bufs) != 0) {
current->state = TASK_INTERRUPTIBLE;
add_wait_queue(&q->flush_queue, &entry);
for (;;) {
if (!atomic_read(&dev_priv->pending_bufs)) break;
printk("Calling schedule from flush_queue : %d\n",
atomic_read(&dev_priv->pending_bufs));
mga_dma_schedule(dev, 1);
schedule();
if (signal_pending(current)) {
ret = -EINTR; /* Can't restart */
break;
}
}
printk("Exited out of schedule from flush_queue\n");
current->state = TASK_RUNNING;
remove_wait_queue(&q->flush_queue, &entry);
}
return ret;
}
int mga_lock(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
DECLARE_WAITQUEUE(entry, current);
int ret = 0;
drm_lock_t lock;
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
current->pid, lock.context);
return -EINVAL;
}
printk("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock.context, current->pid, dev->lock.hw_lock->lock,
lock.flags);
if (lock.context < 0) {
return -EINVAL;
}
/* Only one queue:
*/
if (!ret) {
add_wait_queue(&dev->lock.lock_queue, &entry);
for (;;) {
if (!dev->lock.hw_lock) {
/* Device has been unregistered */
ret = -EINTR;
break;
}
if (drm_lock_take(&dev->lock.hw_lock->lock,
lock.context)) {
dev->lock.pid = current->pid;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->total_locks);
break; /* Got lock */
}
/* Contention */
atomic_inc(&dev->total_sleeps);
current->state = TASK_INTERRUPTIBLE;
current->policy |= SCHED_YIELD;
printk("Calling lock schedule\n");
schedule();
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
}
current->state = TASK_RUNNING;
remove_wait_queue(&dev->lock.lock_queue, &entry);
}
if (!ret) {
if (lock.flags & _DRM_LOCK_QUIESCENT) {
printk("_DRM_LOCK_QUIESCENT\n");
ret = mga_flush_queue(dev);
if(ret != 0) {
drm_lock_free(dev, &dev->lock.hw_lock->lock,
lock.context);
} else {
mga_dma_quiescent(dev);
}
}
}
printk("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
return ret;
}

112
linux/mga_dma.h Normal file
View File

@ -0,0 +1,112 @@
#ifndef MGA_DMA_H
#define MGA_DMA_H
#include "mga_drm_public.h"
/* Isn't this fun. This has to be fixed asap by emitting primary
* dma commands in the 'do_dma' ioctl.
*/
typedef struct {
int dma_type;
unsigned int ContextState[MGA_CTX_SETUP_SIZE];
unsigned int ServerState[MGA_2D_SETUP_SIZE];
unsigned int TexState[2][MGA_TEX_SETUP_SIZE];
unsigned int WarpPipe;
unsigned int dirty;
unsigned int nbox;
xf86drmClipRectRec boxes[MGA_NR_SAREA_CLIPRECTS];
} drm_mga_buf_priv_t;
#define MGA_DMA_GENERAL 0
#define MGA_DMA_VERTEX 1
#define MGA_DMA_SETUP 2
#define MGA_DMA_ILOAD 3
#define DWGREG0 0x1c00
#define DWGREG0_END 0x1dff
#define DWGREG1 0x2c00
#define DWGREG1_END 0x2dff
#define ISREG0(r) (r >= DWGREG0 && r <= DWGREG0_END)
#define ADRINDEX0(r) (u8)((r - DWGREG0) >> 2)
#define ADRINDEX1(r) (u8)(((r - DWGREG1) >> 2) | 0x80)
#define ADRINDEX(r) (ISREG0(r) ? ADRINDEX0(r) : ADRINDEX1(r))
/* Macros for inserting commands into a secondary dma buffer.
*/
#define DMALOCALS u8 tempIndex[4]; u32 *dma_ptr; \
int outcount, num_dwords;
#define DMAGETPTR(buf) do { \
dma_ptr = (u32 *)((u8 *)buf->address + buf->used); \
outcount = 0; \
num_dwords = buf->used / 4; \
} while(0)
#define DMAADVANCE(buf) do { \
buf->used = num_dwords * 4; \
} while(0)
#define DMAOUTREG(reg, val) do { \
tempIndex[outcount]=ADRINDEX(reg); \
dma_ptr[++outcount] = val; \
if (outcount == 4) { \
outcount = 0; \
dma_ptr[0] = *(u32 *)tempIndex; \
dma_ptr+=5; \
num_dwords += 5; \
} \
}while (0)
#define VERBO 0
/* Primary buffer versions of above -- pretty similar really.
*/
#define PRIMLOCALS u8 tempIndex[4]; u32 *dma_ptr; u32 phys_head; \
int outcount, num_dwords
#define PRIMRESET(dev_priv) do { \
dev_priv->prim_num_dwords = 0; \
dev_priv->current_dma_ptr = dev_priv->prim_head; \
} while (0)
#define PRIMGETPTR(dev_priv) do { \
dma_ptr = dev_priv->current_dma_ptr; \
phys_head = dev_priv->prim_phys_head; \
num_dwords = dev_priv->prim_num_dwords; \
outcount = 0; \
} while (0)
#define PRIMADVANCE(dev_priv) do { \
dev_priv->prim_num_dwords = num_dwords; \
dev_priv->current_dma_ptr = dma_ptr; \
} while (0)
#define PRIMOUTREG(reg, val) do { \
tempIndex[outcount]=ADRINDEX(reg); \
dma_ptr[1+outcount] = val; \
if( ++outcount == 4) { \
outcount = 0; \
dma_ptr[0] = *(u32 *)tempIndex; \
dma_ptr+=5; \
num_dwords += 5; \
} \
if (VERBO) \
printk(KERN_INFO \
"OUT %x val %x dma_ptr %p nr_dwords %d\n", \
outcount, ADRINDEX(reg), dma_ptr, \
num_dwords); \
}while (0)
#endif

218
linux/mga_drm_public.h Normal file
View File

@ -0,0 +1,218 @@
/* mga_drm_public.h -- Public header for the Matrox g200/g400 driver -*- linux-c -*-
* Created: Tue Jan 25 01:50:01 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Jeff Hartmann <jhartmann@precisioninsight.com>
* Keith Whitwell <keithw@precisioninsight.com>
*
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_drm_public.h,v 1.1 2000/02/11 17:26:07 dawes Exp $
*/
#ifndef _MGA_DRM_PUBLIC_H_
#define _MGA_DRM_PUBLIC_H_
#define MGA_F 0x1 /* fog */
#define MGA_A 0x2 /* alpha */
#define MGA_S 0x4 /* specular */
#define MGA_T2 0x8 /* multitexture */
#define MGA_WARP_TGZ 0
#define MGA_WARP_TGZF (MGA_F)
#define MGA_WARP_TGZA (MGA_A)
#define MGA_WARP_TGZAF (MGA_F|MGA_A)
#define MGA_WARP_TGZS (MGA_S)
#define MGA_WARP_TGZSF (MGA_S|MGA_F)
#define MGA_WARP_TGZSA (MGA_S|MGA_A)
#define MGA_WARP_TGZSAF (MGA_S|MGA_F|MGA_A)
#define MGA_WARP_T2GZ (MGA_T2)
#define MGA_WARP_T2GZF (MGA_T2|MGA_F)
#define MGA_WARP_T2GZA (MGA_T2|MGA_A)
#define MGA_WARP_T2GZAF (MGA_T2|MGA_A|MGA_F)
#define MGA_WARP_T2GZS (MGA_T2|MGA_S)
#define MGA_WARP_T2GZSF (MGA_T2|MGA_S|MGA_F)
#define MGA_WARP_T2GZSA (MGA_T2|MGA_S|MGA_A)
#define MGA_WARP_T2GZSAF (MGA_T2|MGA_S|MGA_F|MGA_A)
#define MGA_MAX_G400_PIPES 16
#define MGA_MAX_G200_PIPES 8 /* no multitex */
#define MGA_MAX_WARP_PIPES MGA_MAX_G400_PIPES
#define MGA_CARD_TYPE_G200 1
#define MGA_CARD_TYPE_G400 2
typedef struct _drm_mga_warp_index {
int installed;
unsigned long phys_addr;
int size;
} mgaWarpIndex;
typedef struct drm_mga_init {
enum {
MGA_INIT_DMA = 0x01,
MGA_CLEANUP_DMA = 0x02
} func;
int reserved_map_agpstart;
int reserved_map_idx;
int buffer_map_idx;
int sarea_priv_offset;
int primary_size;
int warp_ucode_size;
int fbOffset;
int backOffset;
int depthOffset;
int textureOffset;
int textureSize;
int cpp;
int stride;
int sgram;
int chipset;
mgaWarpIndex WarpIndex[MGA_MAX_WARP_PIPES];
/* Redundant?
*/
int frontOrg;
int backOrg;
int depthOrg;
int mAccess;
} drm_mga_init_t;
typedef struct _xf86drmClipRectRec {
unsigned short x1;
unsigned short y1;
unsigned short x2;
unsigned short y2;
} xf86drmClipRectRec;
#define MGA_CLEAR_FRONT 0x1
#define MGA_CLEAR_BACK 0x2
#define MGA_CLEAR_DEPTH 0x4
/* Each context has a state:
*/
#define MGA_CTXREG_DSTORG 0 /* validated */
#define MGA_CTXREG_MACCESS 1
#define MGA_CTXREG_PLNWT 2
#define MGA_CTXREG_DWGCTL 3
#define MGA_CTXREG_ALPHACTRL 4
#define MGA_CTXREG_FOGCOLOR 5
#define MGA_CTXREG_WFLAG 6
#define MGA_CTXREG_TDUAL0 7
#define MGA_CTXREG_TDUAL1 8
#define MGA_CTX_SETUP_SIZE 9
/* 2d state
*/
#define MGA_2DREG_PITCH 0
#define MGA_2D_SETUP_SIZE 1
/* Each texture unit has a state:
*/
#define MGA_TEXREG_CTL 0
#define MGA_TEXREG_CTL2 1
#define MGA_TEXREG_FILTER 2
#define MGA_TEXREG_BORDERCOL 3
#define MGA_TEXREG_ORG 4 /* validated */
#define MGA_TEXREG_ORG1 5
#define MGA_TEXREG_ORG2 6
#define MGA_TEXREG_ORG3 7
#define MGA_TEXREG_ORG4 8
#define MGA_TEXREG_WIDTH 9
#define MGA_TEXREG_HEIGHT 10
#define MGA_TEX_SETUP_SIZE 11
/* What needs to be changed for the current vertex dma buffer?
*/
#define MGASAREA_NEW_CONTEXT 0x1
#define MGASAREA_NEW_TEX0 0x2
#define MGASAREA_NEW_TEX1 0x4
#define MGASAREA_NEW_PIPE 0x8
#define MGASAREA_NEW_2D 0x10
/* Keep this small for testing
*/
#define MGA_NR_SAREA_CLIPRECTS 2
/* Upto 128 regions. Minimum region size of 256k.
*/
#define MGA_NR_TEX_REGIONS 128
#define MGA_MIN_LOG_TEX_GRANULARITY 18
typedef struct {
unsigned char next, prev;
unsigned char in_use;
int age;
} mgaTexRegion;
typedef struct
{
unsigned int ContextState[MGA_CTX_SETUP_SIZE];
unsigned int ServerState[MGA_2D_SETUP_SIZE];
unsigned int TexState[2][MGA_TEX_SETUP_SIZE];
unsigned int WarpPipe;
unsigned int dirty;
unsigned int nbox;
xf86drmClipRectRec boxes[MGA_NR_SAREA_CLIPRECTS];
/* kernel doesn't touch from here down */
int ctxOwner;
mgaTexRegion texList[MGA_NR_TEX_REGIONS+1];
int texAge;
} drm_mga_sarea_t;
/* Device specific ioctls:
*/
typedef struct {
int clear_color;
int clear_depth;
int flags;
} drm_mga_clear_t;
typedef struct {
int flags; /* not actually used? */
} drm_mga_swap_t;
typedef struct {
unsigned int destOrg;
unsigned int mAccess;
unsigned int pitch;
xf86drmClipRectRec texture;
int idx;
} drm_mga_iload_t;
#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t)
#define DRM_IOCTL_MGA_SWAP DRM_IOW( 0x41, drm_mga_swap_t)
#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x42, drm_mga_clear_t)
#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x43, drm_mga_iload_t)
#endif

576
linux/mga_drv.c Normal file
View File

@ -0,0 +1,576 @@
/* mga_drv.c -- Matrox g200/g400 driver -*- linux-c -*-
* Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
* Jeff Hartmann <jhartmann@precisioninsight.com>
*
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_drv.c,v 1.1 2000/02/11 17:26:07 dawes Exp $
*
*/
#define EXPORT_SYMTAB
#include "drmP.h"
#include "mga_drv.h"
EXPORT_SYMBOL(mga_init);
EXPORT_SYMBOL(mga_cleanup);
#define MGA_NAME "mga"
#define MGA_DESC "Matrox g200/g400"
#define MGA_DATE "19991213"
#define MGA_MAJOR 0
#define MGA_MINOR 0
#define MGA_PATCHLEVEL 1
static drm_device_t mga_device;
drm_ctx_t mga_res_ctx;
static struct file_operations mga_fops = {
open: mga_open,
flush: drm_flush,
release: mga_release,
ioctl: mga_ioctl,
mmap: drm_mmap,
read: drm_read,
fasync: drm_fasync,
};
static struct miscdevice mga_misc = {
minor: MISC_DYNAMIC_MINOR,
name: MGA_NAME,
fops: &mga_fops,
};
static drm_ioctl_desc_t mga_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { mga_version, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { mga_control, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { mga_addbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { mga_markbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { mga_infobufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { mga_mapbufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { mga_freebufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { mga_addctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { mga_rmctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { mga_modctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { mga_getctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { mga_switchctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { mga_newctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { mga_resctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { mga_dma, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { mga_lock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { mga_unlock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_INIT)] = { mga_dma_init, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_SWAP)] = { mga_clear_bufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_CLEAR)] = { mga_swap_bufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_ILOAD)] = { mga_iload, 1, 1 },
};
#define MGA_IOCTL_COUNT DRM_ARRAY_SIZE(mga_ioctls)
#ifdef MODULE
int init_module(void);
void cleanup_module(void);
static char *mga = NULL;
MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas.");
MODULE_DESCRIPTION("Matrox g200/g400");
MODULE_PARM(mga, "s");
/* init_module is called when insmod is used to load the module */
int init_module(void)
{
DRM_DEBUG("doing mga_init()\n");
return mga_init();
}
/* cleanup_module is called when rmmod is used to unload the module */
void cleanup_module(void)
{
mga_cleanup();
}
#endif
#ifndef MODULE
/* mga_setup is called by the kernel to parse command-line options passed
* via the boot-loader (e.g., LILO). It calls the insmod option routine,
* drm_parse_drm.
*
* This is not currently supported, since it requires changes to
* linux/init/main.c. */
void __init mga_setup(char *str, int *ints)
{
if (ints[0] != 0) {
DRM_ERROR("Illegal command line format, ignored\n");
return;
}
drm_parse_options(str);
}
#endif
static int mga_setup(drm_device_t *dev)
{
int i;
atomic_set(&dev->ioctl_count, 0);
atomic_set(&dev->vma_count, 0);
dev->buf_use = 0;
atomic_set(&dev->buf_alloc, 0);
drm_dma_setup(dev);
atomic_set(&dev->total_open, 0);
atomic_set(&dev->total_close, 0);
atomic_set(&dev->total_ioctl, 0);
atomic_set(&dev->total_irq, 0);
atomic_set(&dev->total_ctx, 0);
atomic_set(&dev->total_locks, 0);
atomic_set(&dev->total_unlocks, 0);
atomic_set(&dev->total_contends, 0);
atomic_set(&dev->total_sleeps, 0);
for (i = 0; i < DRM_HASH_SIZE; i++) {
dev->magiclist[i].head = NULL;
dev->magiclist[i].tail = NULL;
}
dev->maplist = NULL;
dev->map_count = 0;
dev->vmalist = NULL;
dev->lock.hw_lock = NULL;
init_waitqueue_head(&dev->lock.lock_queue);
dev->queue_count = 0;
dev->queue_reserved = 0;
dev->queue_slots = 0;
dev->queuelist = NULL;
dev->irq = 0;
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma_flag = 0;
dev->last_context = 0;
dev->last_switch = 0;
dev->last_checked = 0;
init_timer(&dev->timer);
init_waitqueue_head(&dev->context_wait);
dev->ctx_start = 0;
dev->lck_start = 0;
dev->buf_rp = dev->buf;
dev->buf_wp = dev->buf;
dev->buf_end = dev->buf + DRM_BSZ;
dev->buf_async = NULL;
init_waitqueue_head(&dev->buf_readers);
init_waitqueue_head(&dev->buf_writers);
DRM_DEBUG("\n");
/* The kernel's context could be created here, but is now created
in drm_dma_enqueue. This is more resource-efficient for
hardware that does not do DMA, but may mean that
drm_select_queue fails between the time the interrupt is
initialized and the time the queues are initialized. */
return 0;
}
static int mga_takedown(drm_device_t *dev)
{
int i;
drm_magic_entry_t *pt, *next;
drm_map_t *map;
drm_vma_entry_t *vma, *vma_next;
DRM_DEBUG("\n");
if (dev->irq) mga_irq_uninstall(dev);
down(&dev->struct_sem);
del_timer(&dev->timer);
if (dev->devname) {
drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
dev->devname = NULL;
}
if (dev->unique) {
drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
dev->unique = NULL;
dev->unique_len = 0;
}
/* Clear pid list */
for (i = 0; i < DRM_HASH_SIZE; i++) {
for (pt = dev->magiclist[i].head; pt; pt = next) {
next = pt->next;
drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
}
dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
}
/* Clear AGP information */
if (dev->agp) {
drm_agp_mem_t *entry;
drm_agp_mem_t *nexte;
/* Remove AGP resources, but leave dev->agp
intact until cleanup is called. */
for (entry = dev->agp->memory; entry; entry = nexte) {
nexte = entry->next;
if (entry->bound) drm_unbind_agp(entry->memory);
drm_free_agp(entry->memory, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
}
dev->agp->memory = NULL;
if (dev->agp->acquired && drm_agp.release)
(*drm_agp.release)();
dev->agp->acquired = 0;
dev->agp->enabled = 0;
}
/* Clear vma list (only built for debugging) */
if (dev->vmalist) {
for (vma = dev->vmalist; vma; vma = vma_next) {
vma_next = vma->next;
drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
dev->vmalist = NULL;
}
/* Clear map area and mtrr information */
if (dev->maplist) {
for (i = 0; i < dev->map_count; i++) {
map = dev->maplist[i];
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
#ifdef CONFIG_MTRR
if (map->mtrr >= 0) {
int retcode;
retcode = mtrr_del(map->mtrr,
map->offset,
map->size);
DRM_DEBUG("mtrr_del = %d\n", retcode);
}
#endif
drm_ioremapfree(map->handle, map->size);
break;
case _DRM_SHM:
drm_free_pages((unsigned long)map->handle,
drm_order(map->size)
- PAGE_SHIFT,
DRM_MEM_SAREA);
break;
case _DRM_AGP:
break;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
drm_free(dev->maplist,
dev->map_count * sizeof(*dev->maplist),
DRM_MEM_MAPS);
dev->maplist = NULL;
dev->map_count = 0;
}
if (dev->queuelist) {
for (i = 0; i < dev->queue_count; i++) {
drm_waitlist_destroy(&dev->queuelist[i]->waitlist);
if (dev->queuelist[i]) {
drm_free(dev->queuelist[i],
sizeof(*dev->queuelist[0]),
DRM_MEM_QUEUES);
dev->queuelist[i] = NULL;
}
}
drm_free(dev->queuelist,
dev->queue_slots * sizeof(*dev->queuelist),
DRM_MEM_QUEUES);
dev->queuelist = NULL;
}
drm_dma_takedown(dev);
dev->queue_count = 0;
if (dev->lock.hw_lock) {
dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.pid = 0;
wake_up_interruptible(&dev->lock.lock_queue);
}
up(&dev->struct_sem);
return 0;
}
/* mga_init is called via init_module at module load time, or via
* linux/init/main.c (this is not currently supported). */
int mga_init(void)
{
int retcode;
drm_device_t *dev = &mga_device;
DRM_DEBUG("\n");
memset((void *)dev, 0, sizeof(*dev));
dev->count_lock = SPIN_LOCK_UNLOCKED;
sema_init(&dev->struct_sem, 1);
#ifdef MODULE
drm_parse_options(mga);
#endif
DRM_DEBUG("doing misc_register\n");
if ((retcode = misc_register(&mga_misc))) {
DRM_ERROR("Cannot register \"%s\"\n", MGA_NAME);
return retcode;
}
dev->device = MKDEV(MISC_MAJOR, mga_misc.minor);
dev->name = MGA_NAME;
DRM_DEBUG("doing mem init\n");
drm_mem_init();
DRM_DEBUG("doing proc init\n");
drm_proc_init(dev);
DRM_DEBUG("doing agp init\n");
dev->agp = drm_agp_init();
DRM_DEBUG("doing ctxbitmap init\n");
if((retcode = drm_ctxbitmap_init(dev))) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
drm_proc_cleanup();
misc_deregister(&mga_misc);
mga_takedown(dev);
return retcode;
}
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
MGA_NAME,
MGA_MAJOR,
MGA_MINOR,
MGA_PATCHLEVEL,
MGA_DATE,
mga_misc.minor);
return 0;
}
/* mga_cleanup is called via cleanup_module at module unload time. */
void mga_cleanup(void)
{
drm_device_t *dev = &mga_device;
DRM_DEBUG("\n");
drm_proc_cleanup();
if (misc_deregister(&mga_misc)) {
DRM_ERROR("Cannot unload module\n");
} else {
DRM_INFO("Module unloaded\n");
}
drm_ctxbitmap_cleanup(dev);
mga_dma_cleanup(dev);
mga_takedown(dev);
if (dev->agp) {
drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
dev->agp = NULL;
}
}
int mga_version(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_version_t version;
int len;
copy_from_user_ret(&version,
(drm_version_t *)arg,
sizeof(version),
-EFAULT);
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
copy_to_user_ret(name, value, len, -EFAULT); \
}
version.version_major = MGA_MAJOR;
version.version_minor = MGA_MINOR;
version.version_patchlevel = MGA_PATCHLEVEL;
DRM_COPY(version.name, MGA_NAME);
DRM_COPY(version.date, MGA_DATE);
DRM_COPY(version.desc, MGA_DESC);
copy_to_user_ret((drm_version_t *)arg,
&version,
sizeof(version),
-EFAULT);
return 0;
}
int mga_open(struct inode *inode, struct file *filp)
{
drm_device_t *dev = &mga_device;
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (!(retcode = drm_open_helper(inode, filp, dev))) {
MOD_INC_USE_COUNT;
atomic_inc(&dev->total_open);
spin_lock(&dev->count_lock);
if (!dev->open_count++) {
spin_unlock(&dev->count_lock);
return mga_setup(dev);
}
spin_unlock(&dev->count_lock);
}
return retcode;
}
int mga_release(struct inode *inode, struct file *filp)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (!(retcode = drm_release(inode, filp))) {
MOD_DEC_USE_COUNT;
atomic_inc(&dev->total_close);
spin_lock(&dev->count_lock);
if (!--dev->open_count) {
if (atomic_read(&dev->ioctl_count) || dev->blocked) {
DRM_ERROR("Device busy: %d %d\n",
atomic_read(&dev->ioctl_count),
dev->blocked);
spin_unlock(&dev->count_lock);
return -EBUSY;
}
spin_unlock(&dev->count_lock);
return mga_takedown(dev);
}
spin_unlock(&dev->count_lock);
}
return retcode;
}
/* drm_ioctl is called whenever a process performs an ioctl on /dev/drm. */
int mga_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
int nr = DRM_IOCTL_NR(cmd);
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int retcode = 0;
drm_ioctl_desc_t *ioctl;
drm_ioctl_t *func;
atomic_inc(&dev->ioctl_count);
atomic_inc(&dev->total_ioctl);
++priv->ioctl_count;
DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
current->pid, cmd, nr, dev->device, priv->authenticated);
if (nr >= MGA_IOCTL_COUNT) {
retcode = -EINVAL;
} else {
ioctl = &mga_ioctls[nr];
func = ioctl->func;
if (!func) {
DRM_DEBUG("no function\n");
retcode = -EINVAL;
} else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
|| (ioctl->auth_needed && !priv->authenticated)) {
retcode = -EACCES;
} else {
retcode = (func)(inode, filp, cmd, arg);
}
}
atomic_dec(&dev->ioctl_count);
return retcode;
}
int mga_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_lock_t lock;
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
current->pid, lock.context);
return -EINVAL;
}
DRM_DEBUG("%d frees lock (%d holds)\n",
lock.context,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
atomic_inc(&dev->total_unlocks);
if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
atomic_inc(&dev->total_contends);
drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
mga_dma_schedule(dev, 1);
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
return 0;
}

292
linux/mga_drv.h Normal file
View File

@ -0,0 +1,292 @@
/* mga_drv.h -- Private header for the Matrox g200/g400 driver -*- linux-c -*-
* Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
* Jeff Hartmann <jhartmann@precisioninsight.com>
*
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_drv.h,v 1.1 2000/02/11 17:26:08 dawes Exp $
*/
#ifndef _MGA_DRV_H_
#define _MGA_DRV_H_
#include "mga_drm_public.h"
typedef struct _drm_mga_private {
int reserved_map_idx;
int buffer_map_idx;
drm_mga_sarea_t *sarea_priv;
int primary_size;
int warp_ucode_size;
int chipset;
int fbOffset;
int backOffset;
int depthOffset;
int textureOffset;
int textureSize;
int cpp;
int stride;
int sgram;
int use_agp;
mgaWarpIndex WarpIndex[MGA_MAX_G400_PIPES];
__volatile__ unsigned long softrap_age;
atomic_t dispatch_lock;
atomic_t pending_bufs;
void *ioremap;
u32 *prim_head;
u32 *current_dma_ptr;
u32 prim_phys_head;
int prim_num_dwords;
int prim_max_dwords;
/* Some validated register values:
*/
u32 frontOrg;
u32 backOrg;
u32 depthOrg;
u32 mAccess;
} drm_mga_private_t;
/* mga_drv.c */
extern int mga_init(void);
extern void mga_cleanup(void);
extern int mga_version(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int mga_open(struct inode *inode, struct file *filp);
extern int mga_release(struct inode *inode, struct file *filp);
extern int mga_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int mga_unlock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* mga_dma.c */
extern int mga_dma_schedule(drm_device_t *dev, int locked);
extern int mga_dma(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int mga_irq_install(drm_device_t *dev, int irq);
extern int mga_irq_uninstall(drm_device_t *dev);
extern int mga_control(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int mga_lock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
#if 0
extern void mga_dma_init(drm_device_t *dev);
extern void mga_dma_cleanup(drm_device_t *dev);
#endif
extern int mga_dma_init(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int mga_dma_cleanup(drm_device_t *dev);
/* mga_dma_init does init and release */
/* mga_bufs.c */
extern int mga_addbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int mga_infobufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int mga_markbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int mga_freebufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int mga_mapbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int mga_addmap(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* mga_state.c */
extern int mga_clear_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int mga_swap_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int mga_iload(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* mga_context.c */
extern int mga_resctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int mga_addctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int mga_modctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int mga_getctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int mga_switchctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int mga_newctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int mga_rmctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int mga_context_switch(drm_device_t *dev, int old, int new);
extern int mga_context_switch_complete(drm_device_t *dev, int new);
#define MGAREG_MGA_EXEC 0x0100
#define MGAREG_AGP_PLL 0x1e4c
#define MGAREG_ALPHACTRL 0x2c7c
#define MGAREG_ALPHASTART 0x2c70
#define MGAREG_ALPHAXINC 0x2c74
#define MGAREG_ALPHAYINC 0x2c78
#define MGAREG_AR0 0x1c60
#define MGAREG_AR1 0x1c64
#define MGAREG_AR2 0x1c68
#define MGAREG_AR3 0x1c6c
#define MGAREG_AR4 0x1c70
#define MGAREG_AR5 0x1c74
#define MGAREG_AR6 0x1c78
#define MGAREG_BCOL 0x1c20
#define MGAREG_CXBNDRY 0x1c80
#define MGAREG_CXLEFT 0x1ca0
#define MGAREG_CXRIGHT 0x1ca4
#define MGAREG_DMAPAD 0x1c54
#define MGAREG_DR0_Z32LSB 0x2c50
#define MGAREG_DR0_Z32MSB 0x2c54
#define MGAREG_DR2_Z32LSB 0x2c60
#define MGAREG_DR2_Z32MSB 0x2c64
#define MGAREG_DR3_Z32LSB 0x2c68
#define MGAREG_DR3_Z32MSB 0x2c6c
#define MGAREG_DR0 0x1cc0
#define MGAREG_DR2 0x1cc8
#define MGAREG_DR3 0x1ccc
#define MGAREG_DR4 0x1cd0
#define MGAREG_DR6 0x1cd8
#define MGAREG_DR7 0x1cdc
#define MGAREG_DR8 0x1ce0
#define MGAREG_DR10 0x1ce8
#define MGAREG_DR11 0x1cec
#define MGAREG_DR12 0x1cf0
#define MGAREG_DR14 0x1cf8
#define MGAREG_DR15 0x1cfc
#define MGAREG_DSTORG 0x2cb8
#define MGAREG_DWG_INDIR_WT 0x1e80
#define MGAREG_DWGCTL 0x1c00
#define MGAREG_DWGSYNC 0x2c4c
#define MGAREG_FCOL 0x1c24
#define MGAREG_FIFOSTATUS 0x1e10
#define MGAREG_FOGCOL 0x1cf4
#define MGAREG_FOGSTART 0x1cc4
#define MGAREG_FOGXINC 0x1cd4
#define MGAREG_FOGYINC 0x1ce4
#define MGAREG_FXBNDRY 0x1c84
#define MGAREG_FXLEFT 0x1ca8
#define MGAREG_FXRIGHT 0x1cac
#define MGAREG_ICLEAR 0x1e18
#define MGAREG_IEN 0x1e1c
#define MGAREG_LEN 0x1c5c
#define MGAREG_MACCESS 0x1c04
#define MGAREG_MCTLWTST 0x1c08
#define MGAREG_MEMRDBK 0x1e44
#define MGAREG_OPMODE 0x1e54
#define MGAREG_PAT0 0x1c10
#define MGAREG_PAT1 0x1c14
#define MGAREG_PITCH 0x1c8c
#define MGAREG_PLNWT 0x1c1c
#define MGAREG_PRIMADDRESS 0x1e58
#define MGAREG_PRIMEND 0x1e5c
#define MGAREG_PRIMPTR 0x1e50
#define MGAREG_RST 0x1e40
#define MGAREG_SECADDRESS 0x2c40
#define MGAREG_SECEND 0x2c44
#define MGAREG_SETUPADDRESS 0x2cd0
#define MGAREG_SETUPEND 0x2cd4
#define MGAREG_SGN 0x1c58
#define MGAREG_SHIFT 0x1c50
#define MGAREG_SOFTRAP 0x2c48
#define MGAREG_SPECBSTART 0x2c98
#define MGAREG_SPECBXINC 0x2c9c
#define MGAREG_SPECBYINC 0x2ca0
#define MGAREG_SPECGSTART 0x2c8c
#define MGAREG_SPECGXINC 0x2c90
#define MGAREG_SPECGYINC 0x2c94
#define MGAREG_SPECRSTART 0x2c80
#define MGAREG_SPECRXINC 0x2c84
#define MGAREG_SPECRYINC 0x2c88
#define MGAREG_SRC0 0x1c30
#define MGAREG_SRC1 0x1c34
#define MGAREG_SRC2 0x1c38
#define MGAREG_SRC3 0x1c3c
#define MGAREG_SRCORG 0x2cb4
#define MGAREG_STATUS 0x1e14
#define MGAREG_STENCIL 0x2cc8
#define MGAREG_STENCILCTL 0x2ccc
#define MGAREG_TDUALSTAGE0 0x2cf8
#define MGAREG_TDUALSTAGE1 0x2cfc
#define MGAREG_TEST0 0x1e48
#define MGAREG_TEXBORDERCOL 0x2c5c
#define MGAREG_TEXCTL 0x2c30
#define MGAREG_TEXCTL2 0x2c3c
#define MGAREG_TEXFILTER 0x2c58
#define MGAREG_TEXHEIGHT 0x2c2c
#define MGAREG_TEXORG 0x2c24
#define MGAREG_TEXORG1 0x2ca4
#define MGAREG_TEXORG2 0x2ca8
#define MGAREG_TEXORG3 0x2cac
#define MGAREG_TEXORG4 0x2cb0
#define MGAREG_TEXTRANS 0x2c34
#define MGAREG_TEXTRANSHIGH 0x2c38
#define MGAREG_TEXWIDTH 0x2c28
#define MGAREG_TMR0 0x2c00
#define MGAREG_TMR1 0x2c04
#define MGAREG_TMR2 0x2c08
#define MGAREG_TMR3 0x2c0c
#define MGAREG_TMR4 0x2c10
#define MGAREG_TMR5 0x2c14
#define MGAREG_TMR6 0x2c18
#define MGAREG_TMR7 0x2c1c
#define MGAREG_TMR8 0x2c20
#define MGAREG_VBIADDR0 0x3e08
#define MGAREG_VBIADDR1 0x3e0c
#define MGAREG_VCOUNT 0x1e20
#define MGAREG_WACCEPTSEQ 0x1dd4
#define MGAREG_WCODEADDR 0x1e6c
#define MGAREG_WFLAG 0x1dc4
#define MGAREG_WFLAG1 0x1de0
#define MGAREG_WFLAGNB 0x1e64
#define MGAREG_WFLAGNB1 0x1e08
#define MGAREG_WGETMSB 0x1dc8
#define MGAREG_WIADDR 0x1dc0
#define MGAREG_WIADDR2 0x1dd8
#define MGAREG_WIADDRNB 0x1e60
#define MGAREG_WIADDRNB1 0x1e04
#define MGAREG_WIADDRNB2 0x1e00
#define MGAREG_WIMEMADDR 0x1e68
#define MGAREG_WIMEMDATA 0x2000
#define MGAREG_WIMEMDATA1 0x2100
#define MGAREG_WMISC 0x1e70
#define MGAREG_WR 0x2d00
#define MGAREG_WVRTXSZ 0x1dcc
#define MGAREG_XDST 0x1cb0
#define MGAREG_XYEND 0x1c44
#define MGAREG_XYSTRT 0x1c40
#define MGAREG_YBOT 0x1c9c
#define MGAREG_YDST 0x1c90
#define MGAREG_YDSTLEN 0x1c88
#define MGAREG_YDSTORG 0x1c94
#define MGAREG_YTOP 0x1c98
#define MGAREG_ZORG 0x1c0c
#endif

362
linux/mga_state.c Normal file
View File

@ -0,0 +1,362 @@
/* mga_state.c -- State support for mga g200/g400 -*- linux-c -*-
* Created: Thu Jan 27 02:53:43 2000 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Jeff Hartmann <jhartmann@precisioninsight.com>
* Keith Whitwell <keithw@precisioninsight.com>
*
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_state.c,v 1.1 2000/02/11 17:26:08 dawes Exp $
*
*/
#define __NO_VERSION__
#include "drmP.h"
#include "mga_drv.h"
#include "mgareg_flags.h"
#include "mga_dma.h"
#include "mga_state.h"
#include "drm.h"
void mgaEmitClipRect( drm_mga_private_t *dev_priv, xf86drmClipRectRec *box )
{
PRIMLOCALS;
PRIMGETPTR( dev_priv );
/* The G400 seems to have an issue with the second WARP not
* stalling clipper register writes. This bothers me, but the only
* way I could get it to never clip the last triangle under any
* circumstances is by inserting TWO dwgsync commands.
*/
if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
PRIMOUTREG( MGAREG_DWGSYNC, 0 );
PRIMOUTREG( MGAREG_DWGSYNC, 0 );
}
PRIMOUTREG( MGAREG_CXBNDRY, ((box->x2)<<16)|(box->x1) );
PRIMOUTREG( MGAREG_YTOP, box->y1 * dev_priv->stride );
PRIMOUTREG( MGAREG_YBOT, box->y2 * dev_priv->stride );
PRIMADVANCE( dev_priv );
}
static void mgaEmitContext(drm_mga_private_t *dev_priv,
drm_mga_buf_priv_t *buf_priv)
{
unsigned int *regs = buf_priv->ContextState;
PRIMLOCALS;
PRIMGETPTR( dev_priv );
PRIMOUTREG( MGAREG_DSTORG, regs[MGA_CTXREG_DSTORG] );
PRIMOUTREG( MGAREG_MACCESS, regs[MGA_CTXREG_MACCESS] );
PRIMOUTREG( MGAREG_PLNWT, regs[MGA_CTXREG_PLNWT] );
PRIMOUTREG( MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL] );
PRIMOUTREG( MGAREG_ALPHACTRL, regs[MGA_CTXREG_ALPHACTRL] );
PRIMOUTREG( MGAREG_FOGCOL, regs[MGA_CTXREG_FOGCOLOR] );
PRIMOUTREG( MGAREG_WFLAG, regs[MGA_CTXREG_WFLAG] );
if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
PRIMOUTREG( MGAREG_WFLAG1, regs[MGA_CTXREG_WFLAG] );
PRIMOUTREG( MGAREG_TDUALSTAGE0, regs[MGA_CTXREG_TDUAL0] );
PRIMOUTREG( MGAREG_TDUALSTAGE1, regs[MGA_CTXREG_TDUAL1] );
}
PRIMADVANCE( dev_priv );
}
static void mgaG200EmitTex( drm_mga_private_t *dev_priv,
drm_mga_buf_priv_t *buf_priv )
{
unsigned int *regs = buf_priv->TexState[0];
PRIMLOCALS;
PRIMGETPTR( dev_priv );
PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] );
PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL] );
PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER] );
PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL] );
PRIMOUTREG(MGAREG_TEXORG, regs[MGA_TEXREG_ORG] );
PRIMOUTREG(MGAREG_TEXORG1, regs[MGA_TEXREG_ORG1] );
PRIMOUTREG(MGAREG_TEXORG2, regs[MGA_TEXREG_ORG2] );
PRIMOUTREG(MGAREG_TEXORG3, regs[MGA_TEXREG_ORG3] );
PRIMOUTREG(MGAREG_TEXORG4, regs[MGA_TEXREG_ORG4] );
PRIMOUTREG(MGAREG_TEXWIDTH, regs[MGA_TEXREG_WIDTH] );
PRIMOUTREG(MGAREG_TEXHEIGHT, regs[MGA_TEXREG_HEIGHT] );
PRIMOUTREG(0x2d00 + 24*4, regs[MGA_TEXREG_WIDTH] );
PRIMOUTREG(0x2d00 + 34*4, regs[MGA_TEXREG_HEIGHT] );
PRIMADVANCE( dev_priv );
}
static void mgaG400EmitTex0( drm_mga_private_t *dev_priv,
drm_mga_buf_priv_t *buf_priv )
{
unsigned int *regs = buf_priv->TexState[0];
int multitex = buf_priv->WarpPipe & MGA_T2;
PRIMLOCALS;
PRIMGETPTR( dev_priv );
PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] );
PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL] );
PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER] );
PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL] );
PRIMOUTREG(MGAREG_TEXORG, regs[MGA_TEXREG_ORG] );
PRIMOUTREG(MGAREG_TEXORG1, regs[MGA_TEXREG_ORG1] );
PRIMOUTREG(MGAREG_TEXORG2, regs[MGA_TEXREG_ORG2] );
PRIMOUTREG(MGAREG_TEXORG3, regs[MGA_TEXREG_ORG3] );
PRIMOUTREG(MGAREG_TEXORG4, regs[MGA_TEXREG_ORG4] );
PRIMOUTREG(MGAREG_TEXWIDTH, regs[MGA_TEXREG_WIDTH] );
PRIMOUTREG(MGAREG_TEXHEIGHT, regs[MGA_TEXREG_HEIGHT] );
PRIMOUTREG(0x2d00 + 49*4, 0);
PRIMOUTREG(0x2d00 + 57*4, 0);
PRIMOUTREG(0x2d00 + 53*4, 0);
PRIMOUTREG(0x2d00 + 61*4, 0);
if (!multitex) {
PRIMOUTREG(0x2d00 + 52*4, 0x40 );
PRIMOUTREG(0x2d00 + 60*4, 0x40 );
}
PRIMOUTREG(0x2d00 + 54*4, regs[MGA_TEXREG_WIDTH] | 0x40 );
PRIMOUTREG(0x2d00 + 62*4, regs[MGA_TEXREG_HEIGHT] | 0x40 );
PRIMADVANCE( dev_priv );
}
#define TMC_map1_enable 0x80000000
static void mgaG400EmitTex1( drm_mga_private_t *dev_priv,
drm_mga_buf_priv_t *buf_priv )
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int *regs = sarea_priv->TexState[1];
PRIMLOCALS;
PRIMGETPTR(dev_priv);
PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] | TMC_map1_enable);
PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL] );
PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER] );
PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL] );
PRIMOUTREG(MGAREG_TEXORG, regs[MGA_TEXREG_ORG] );
PRIMOUTREG(MGAREG_TEXORG1, regs[MGA_TEXREG_ORG1] );
PRIMOUTREG(MGAREG_TEXORG2, regs[MGA_TEXREG_ORG2] );
PRIMOUTREG(MGAREG_TEXORG3, regs[MGA_TEXREG_ORG3] );
PRIMOUTREG(MGAREG_TEXORG4, regs[MGA_TEXREG_ORG4] );
PRIMOUTREG(MGAREG_TEXWIDTH, regs[MGA_TEXREG_WIDTH] );
PRIMOUTREG(MGAREG_TEXHEIGHT, regs[MGA_TEXREG_HEIGHT] );
PRIMOUTREG(0x2d00 + 49*4, 0);
PRIMOUTREG(0x2d00 + 57*4, 0);
PRIMOUTREG(0x2d00 + 53*4, 0);
PRIMOUTREG(0x2d00 + 61*4, 0);
PRIMOUTREG(0x2d00 + 52*4, regs[MGA_TEXREG_WIDTH] | 0x40 );
PRIMOUTREG(0x2d00 + 60*4, regs[MGA_TEXREG_HEIGHT] | 0x40 );
PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] );
PRIMADVANCE( dev_priv );
}
static void mgaG400EmitPipe(drm_mga_private_t *dev_priv,
drm_mga_buf_priv_t *buf_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int pipe = sarea_priv->WarpPipe;
float fParam = 12800.0f;
PRIMLOCALS;
PRIMGETPTR(dev_priv);
PRIMOUTREG(MGAREG_WIADDR2, WIA_wmode_suspend);
/* Establish vertex size.
*/
if (pipe & MGA_T2) {
PRIMOUTREG(MGAREG_WVRTXSZ, 0x00001e09);
PRIMOUTREG(MGAREG_WACCEPTSEQ, 0x1e000000);
} else {
PRIMOUTREG(MGAREG_WVRTXSZ, 0x00001807);
PRIMOUTREG(MGAREG_WACCEPTSEQ, 0x18000000);
}
PRIMOUTREG(MGAREG_WFLAG, 0);
PRIMOUTREG(MGAREG_WFLAG1, 0);
PRIMOUTREG(0x2d00 + 56*4, *((u32 *)(&fParam)));
PRIMOUTREG(MGAREG_DMAPAD, 0);
PRIMOUTREG(MGAREG_DMAPAD, 0);
PRIMOUTREG(0x2d00 + 49*4, 0); /* Tex stage 0 */
PRIMOUTREG(0x2d00 + 57*4, 0); /* Tex stage 0 */
PRIMOUTREG(0x2d00 + 53*4, 0); /* Tex stage 1 */
PRIMOUTREG(0x2d00 + 61*4, 0); /* Tex stage 1 */
PRIMOUTREG(0x2d00 + 54*4, 0x40); /* Tex stage 0 : w */
PRIMOUTREG(0x2d00 + 62*4, 0x40); /* Tex stage 0 : h */
PRIMOUTREG(0x2d00 + 52*4, 0x40); /* Tex stage 1 : w */
PRIMOUTREG(0x2d00 + 60*4, 0x40); /* Tex stage 1 : h */
/* Dma pading required due to hw bug */
PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
PRIMOUTREG(MGAREG_WIADDR2, (dev_priv->WarpIndex[pipe].phys_addr |
WIA_wmode_start | WIA_wagp_agp));
PRIMADVANCE(dev_priv);
}
static void mgaG200EmitPipe( drm_mga_private_t *dev_priv,
drm_mga_buf_priv_t *buf_priv )
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int pipe = sarea_priv->WarpPipe;
PRIMLOCALS;
PRIMGETPTR(dev_priv);
PRIMOUTREG(MGAREG_WIADDR, WIA_wmode_suspend);
PRIMOUTREG(MGAREG_WVRTXSZ, 7);
PRIMOUTREG(MGAREG_WFLAG, 0);
PRIMOUTREG(0x2d00 + 24*4, 0); /* tex w/h */
PRIMOUTREG(0x2d00 + 25*4, 0x100);
PRIMOUTREG(0x2d00 + 34*4, 0); /* tex w/h */
PRIMOUTREG(0x2d00 + 42*4, 0xFFFF);
PRIMOUTREG(0x2d00 + 60*4, 0xFFFF);
/* Dma pading required due to hw bug */
PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
PRIMOUTREG(MGAREG_WIADDR, (dev_priv->WarpIndex[pipe].phys_addr |
WIA_wmode_start | WIA_wagp_agp));
PRIMADVANCE(dev_priv);
}
void mgaEmitState( drm_mga_private_t *dev_priv, drm_mga_buf_priv_t *buf_priv )
{
unsigned int dirty = buf_priv->dirty;
if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
if (dirty & MGASAREA_NEW_CONTEXT)
mgaEmitContext( dev_priv, buf_priv );
if (dirty & MGASAREA_NEW_TEX1)
mgaG400EmitTex1( dev_priv, buf_priv );
if (dirty & MGASAREA_NEW_TEX0)
mgaG400EmitTex0( dev_priv, buf_priv );
if (dirty & MGASAREA_NEW_PIPE)
mgaG400EmitPipe( dev_priv, buf_priv );
} else {
if (dirty & MGASAREA_NEW_CONTEXT)
mgaEmitContext( dev_priv, buf_priv );
if (dirty & MGASAREA_NEW_TEX0)
mgaG200EmitTex( dev_priv, buf_priv );
if (dirty & MGASAREA_NEW_PIPE)
mgaG200EmitPipe( dev_priv, buf_priv );
}
}
/* Disallow all write destinations except the front and backbuffer.
*/
static int mgaCopyContext(drm_mga_private_t *dev_priv,
drm_mga_buf_priv_t *buf_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int *regs = sarea_priv->ContextState;
if (regs[MGA_CTXREG_DSTORG] != dev_priv->frontOrg &&
regs[MGA_CTXREG_DSTORG] != dev_priv->backOrg)
return -1;
memcpy(buf_priv->ContextState, sarea_priv->ContextState,
sizeof(buf_priv->ContextState));
return 0;
}
/* Disallow texture reads from PCI space.
*/
static int mgaCopyTex(drm_mga_private_t *dev_priv,
drm_mga_buf_priv_t *buf_priv,
int unit)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
if ((sarea_priv->TexState[unit][MGA_TEXREG_ORG] & 0x3) == 0x1)
return -1;
memcpy(buf_priv->TexState[unit], sarea_priv->TexState[unit],
sizeof(buf_priv->TexState[0]));
return 0;
}
int mgaCopyAndVerifyState( drm_mga_private_t *dev_priv,
drm_mga_buf_priv_t *buf_priv )
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty ;
int rv = 0;
buf_priv->dirty = sarea_priv->dirty;
buf_priv->WarpPipe = sarea_priv->WarpPipe;
if (dirty & MGASAREA_NEW_CONTEXT)
rv |= mgaCopyContext( dev_priv, buf_priv );
if (dirty & MGASAREA_NEW_TEX0)
rv |= mgaCopyTex( dev_priv, buf_priv, 0 );
if (dev_priv->chipset == MGA_CARD_TYPE_G400)
{
if (dirty & MGASAREA_NEW_TEX1)
rv |= mgaCopyTex( dev_priv, buf_priv, 1 );
if (dirty & MGASAREA_NEW_PIPE)
rv |= (buf_priv->WarpPipe > MGA_MAX_G400_PIPES);
}
else
{
if (dirty & MGASAREA_NEW_PIPE)
rv |= (buf_priv->WarpPipe > MGA_MAX_G200_PIPES);
}
return rv == 0;
}

13
linux/mga_state.h Normal file
View File

@ -0,0 +1,13 @@
#ifndef MGA_STATE_H
#define MGA_STATE_H
#include "mga_drv.h"
int mgaCopyAndVerifyState( drm_mga_private_t *dev_priv,
drm_mga_buf_priv_t *buf_priv );
void mgaEmitClipRect( drm_mga_private_t *dev_priv, xf86drmClipRectRec *box );
void mgaEmitState( drm_mga_private_t *dev_priv, drm_mga_buf_priv_t *buf_priv );
#endif

930
linux/mgareg_flags.h Normal file
View File

@ -0,0 +1,930 @@
/* author: stephen crowley, crow@debian.org */
/*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* STEPHEN CROWLEY, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _MGAREGS_H_
#define _MGAREGS_H_
/*************** (START) AUTOMATICLY GENERATED REGISTER FILE *****************/
/*
* Generated on Sat Nov 20 21:25:36 CST 1999
*/
/*
* Power Graphic Mode Memory Space Registers
*/
#define AGP_PLL_agp2xpllen_MASK 0xfffffffe /* bit 0 */
#define AGP_PLL_agp2xpllen_disable 0x0
#define AGP_PLL_agp2xpllen_enable 0x1
#define AC_src_MASK 0xfffffff0 /* bits 0-3 */
#define AC_src_zero 0x0 /* val 0, shift 0 */
#define AC_src_one 0x1 /* val 1, shift 0 */
#define AC_src_dst_color 0x2 /* val 2, shift 0 */
#define AC_src_om_dst_color 0x3 /* val 3, shift 0 */
#define AC_src_src_alpha 0x4 /* val 4, shift 0 */
#define AC_src_om_src_alpha 0x5 /* val 5, shift 0 */
#define AC_src_dst_alpha 0x6 /* val 6, shift 0 */
#define AC_src_om_dst_alpha 0x7 /* val 7, shift 0 */
#define AC_src_src_alpha_sat 0x8 /* val 8, shift 0 */
#define AC_dst_MASK 0xffffff0f /* bits 4-7 */
#define AC_dst_zero 0x0 /* val 0, shift 4 */
#define AC_dst_one 0x10 /* val 1, shift 4 */
#define AC_dst_src_color 0x20 /* val 2, shift 4 */
#define AC_dst_om_src_color 0x30 /* val 3, shift 4 */
#define AC_dst_src_alpha 0x40 /* val 4, shift 4 */
#define AC_dst_om_src_alpha 0x50 /* val 5, shift 4 */
#define AC_dst_dst_alpha 0x60 /* val 6, shift 4 */
#define AC_dst_om_dst_alpha 0x70 /* val 7, shift 4 */
#define AC_amode_MASK 0xfffffcff /* bits 8-9 */
#define AC_amode_FCOL 0x0 /* val 0, shift 8 */
#define AC_amode_alpha_channel 0x100 /* val 1, shift 8 */
#define AC_amode_video_alpha 0x200 /* val 2, shift 8 */
#define AC_amode_RSVD 0x300 /* val 3, shift 8 */
#define AC_astipple_MASK 0xfffff7ff /* bit 11 */
#define AC_astipple_disable 0x0
#define AC_astipple_enable 0x800
#define AC_aten_MASK 0xffffefff /* bit 12 */
#define AC_aten_disable 0x0
#define AC_aten_enable 0x1000
#define AC_atmode_MASK 0xffff1fff /* bits 13-15 */
#define AC_atmode_noacmp 0x0 /* val 0, shift 13 */
#define AC_atmode_ae 0x4000 /* val 2, shift 13 */
#define AC_atmode_ane 0x6000 /* val 3, shift 13 */
#define AC_atmode_alt 0x8000 /* val 4, shift 13 */
#define AC_atmode_alte 0xa000 /* val 5, shift 13 */
#define AC_atmode_agt 0xc000 /* val 6, shift 13 */
#define AC_atmode_agte 0xe000 /* val 7, shift 13 */
#define AC_atref_MASK 0xff00ffff /* bits 16-23 */
#define AC_atref_SHIFT 16
#define AC_alphasel_MASK 0xfcffffff /* bits 24-25 */
#define AC_alphasel_fromtex 0x0 /* val 0, shift 24 */
#define AC_alphasel_diffused 0x1000000 /* val 1, shift 24 */
#define AC_alphasel_modulated 0x2000000 /* val 2, shift 24 */
#define AC_alphasel_trans 0x3000000 /* val 3, shift 24 */
#define AR0_ar0_MASK 0xfffc0000 /* bits 0-17 */
#define AR0_ar0_SHIFT 0
#define AR1_ar1_MASK 0xff000000 /* bits 0-23 */
#define AR1_ar1_SHIFT 0
#define AR2_ar2_MASK 0xfffc0000 /* bits 0-17 */
#define AR2_ar2_SHIFT 0
#define AR3_ar3_MASK 0xff000000 /* bits 0-23 */
#define AR3_ar3_SHIFT 0
#define AR3_spage_MASK 0xf8ffffff /* bits 24-26 */
#define AR3_spage_SHIFT 24
#define AR4_ar4_MASK 0xfffc0000 /* bits 0-17 */
#define AR4_ar4_SHIFT 0
#define AR5_ar5_MASK 0xfffc0000 /* bits 0-17 */
#define AR5_ar5_SHIFT 0
#define AR6_ar6_MASK 0xfffc0000 /* bits 0-17 */
#define AR6_ar6_SHIFT 0
#define BC_besen_MASK 0xfffffffe /* bit 0 */
#define BC_besen_disable 0x0
#define BC_besen_enable 0x1
#define BC_besv1srcstp_MASK 0xffffffbf /* bit 6 */
#define BC_besv1srcstp_even 0x0
#define BC_besv1srcstp_odd 0x40
#define BC_besv2srcstp_MASK 0xfffffeff /* bit 8 */
#define BC_besv2srcstp_disable 0x0
#define BC_besv2srcstp_enable 0x100
#define BC_beshfen_MASK 0xfffffbff /* bit 10 */
#define BC_beshfen_disable 0x0
#define BC_beshfen_enable 0x400
#define BC_besvfen_MASK 0xfffff7ff /* bit 11 */
#define BC_besvfen_disable 0x0
#define BC_besvfen_enable 0x800
#define BC_beshfixc_MASK 0xffffefff /* bit 12 */
#define BC_beshfixc_weight 0x0
#define BC_beshfixc_coeff 0x1000
#define BC_bescups_MASK 0xfffeffff /* bit 16 */
#define BC_bescups_disable 0x0
#define BC_bescups_enable 0x10000
#define BC_bes420pl_MASK 0xfffdffff /* bit 17 */
#define BC_bes420pl_422 0x0
#define BC_bes420pl_420 0x20000
#define BC_besdith_MASK 0xfffbffff /* bit 18 */
#define BC_besdith_disable 0x0
#define BC_besdith_enable 0x40000
#define BC_beshmir_MASK 0xfff7ffff /* bit 19 */
#define BC_beshmir_disable 0x0
#define BC_beshmir_enable 0x80000
#define BC_besbwen_MASK 0xffefffff /* bit 20 */
#define BC_besbwen_color 0x0
#define BC_besbwen_bw 0x100000
#define BC_besblank_MASK 0xffdfffff /* bit 21 */
#define BC_besblank_disable 0x0
#define BC_besblank_enable 0x200000
#define BC_besfselm_MASK 0xfeffffff /* bit 24 */
#define BC_besfselm_soft 0x0
#define BC_besfselm_hard 0x1000000
#define BC_besfsel_MASK 0xf9ffffff /* bits 25-26 */
#define BC_besfsel_a1 0x0 /* val 0, shift 25 */
#define BC_besfsel_a2 0x2000000 /* val 1, shift 25 */
#define BC_besfsel_b1 0x4000000 /* val 2, shift 25 */
#define BC_besfsel_b2 0x6000000 /* val 3, shift 25 */
#define BGC_beshzoom_MASK 0xfffffffe /* bit 0 */
#define BGC_beshzoom_disable 0x0
#define BGC_beshzoom_enable 0x1
#define BGC_beshzoomf_MASK 0xfffffffd /* bit 1 */
#define BGC_beshzoomf_disable 0x0
#define BGC_beshzoomf_enable 0x2
#define BGC_bescorder_MASK 0xfffffff7 /* bit 3 */
#define BGC_bescorder_even 0x0
#define BGC_bescorder_odd 0x8
#define BGC_besreghup_MASK 0xffffffef /* bit 4 */
#define BGC_besreghup_disable 0x0
#define BGC_besreghup_enable 0x10
#define BGC_besvcnt_MASK 0xf000ffff /* bits 16-27 */
#define BGC_besvcnt_SHIFT 16
#define BHC_besright_MASK 0xfffff800 /* bits 0-10 */
#define BHC_besright_SHIFT 0
#define BHC_besleft_MASK 0xf800ffff /* bits 16-26 */
#define BHC_besleft_SHIFT 16
#define BHISF_beshiscal_MASK 0xffe00003 /* bits 2-20 */
#define BHISF_beshiscal_SHIFT 2
#define BHSE_beshsrcend_MASK 0xfc000003 /* bits 2-25 */
#define BHSE_beshsrcend_SHIFT 2
#define BHSL_beshsrclst_MASK 0xfc00ffff /* bits 16-25 */
#define BHSL_beshsrclst_SHIFT 16
#define BHSS_beshsrcst_MASK 0xfc000003 /* bits 2-25 */
#define BHSS_beshsrcst_SHIFT 2
#define BP_bespitch_MASK 0xfffff000 /* bits 0-11 */
#define BP_bespitch_SHIFT 0
#define BS_besstat_MASK 0xfffffffc /* bits 0-1 */
#define BS_besstat_a1 0x0 /* val 0, shift 0 */
#define BS_besstat_a2 0x1 /* val 1, shift 0 */
#define BS_besstat_b1 0x2 /* val 2, shift 0 */
#define BS_besstat_b2 0x3 /* val 3, shift 0 */
#define BSF_besv1srclast_MASK 0xfffffc00 /* bits 0-9 */
#define BSF_besv1srclast_SHIFT 0
#define BSF_besv2srclst_MASK 0xfffffc00 /* bits 0-9 */
#define BSF_besv2srclst_SHIFT 0
#define BSF_besv1wght_MASK 0xffff0003 /* bits 2-15 */
#define BSF_besv1wght_SHIFT 2
#define BSF_besv1wghts_MASK 0xfffeffff /* bit 16 */
#define BSF_besv1wghts_disable 0x0
#define BSF_besv1wghts_enable 0x10000
#define BSF_besv2wght_MASK 0xffff0003 /* bits 2-15 */
#define BSF_besv2wght_SHIFT 2
#define BSF_besv2wghts_MASK 0xfffeffff /* bit 16 */
#define BSF_besv2wghts_disable 0x0
#define BSF_besv2wghts_enable 0x10000
#define BVC_besbot_MASK 0xfffff800 /* bits 0-10 */
#define BVC_besbot_SHIFT 0
#define BVC_bestop_MASK 0xf800ffff /* bits 16-26 */
#define BVC_bestop_SHIFT 16
#define BVISF_besviscal_MASK 0xffe00003 /* bits 2-20 */
#define BVISF_besviscal_SHIFT 2
#define CXB_cxleft_MASK 0xfffff000 /* bits 0-11 */
#define CXB_cxleft_SHIFT 0
#define CXB_cxright_MASK 0xf000ffff /* bits 16-27 */
#define CXB_cxright_SHIFT 16
#define DO_dstmap_MASK 0xfffffffe /* bit 0 */
#define DO_dstmap_fb 0x0
#define DO_dstmap_sys 0x1
#define DO_dstacc_MASK 0xfffffffd /* bit 1 */
#define DO_dstacc_pci 0x0
#define DO_dstacc_agp 0x2
#define DO_dstorg_MASK 0x7 /* bits 3-31 */
#define DO_dstorg_SHIFT 3
#define DC_opcod_MASK 0xfffffff0 /* bits 0-3 */
#define DC_opcod_line_open 0x0 /* val 0, shift 0 */
#define DC_opcod_autoline_open 0x1 /* val 1, shift 0 */
#define DC_opcod_line_close 0x2 /* val 2, shift 0 */
#define DC_opcod_autoline_close 0x3 /* val 3, shift 0 */
#define DC_opcod_trap 0x4 /* val 4, shift 0 */
#define DC_opcod_texture_trap 0x6 /* val 6, shift 0 */
#define DC_opcod_bitblt 0x8 /* val 8, shift 0 */
#define DC_opcod_iload 0x9 /* val 9, shift 0 */
#define DC_atype_MASK 0xffffff8f /* bits 4-6 */
#define DC_atype_rpl 0x0 /* val 0, shift 4 */
#define DC_atype_rstr 0x10 /* val 1, shift 4 */
#define DC_atype_zi 0x30 /* val 3, shift 4 */
#define DC_atype_blk 0x40 /* val 4, shift 4 */
#define DC_atype_i 0x70 /* val 7, shift 4 */
#define DC_linear_MASK 0xffffff7f /* bit 7 */
#define DC_linear_xy 0x0
#define DC_linear_linear 0x80
#define DC_zmode_MASK 0xfffff8ff /* bits 8-10 */
#define DC_zmode_nozcmp 0x0 /* val 0, shift 8 */
#define DC_zmode_ze 0x200 /* val 2, shift 8 */
#define DC_zmode_zne 0x300 /* val 3, shift 8 */
#define DC_zmode_zlt 0x400 /* val 4, shift 8 */
#define DC_zmode_zlte 0x500 /* val 5, shift 8 */
#define DC_zmode_zgt 0x600 /* val 6, shift 8 */
#define DC_zmode_zgte 0x700 /* val 7, shift 8 */
#define DC_solid_MASK 0xfffff7ff /* bit 11 */
#define DC_solid_disable 0x0
#define DC_solid_enable 0x800
#define DC_arzero_MASK 0xffffefff /* bit 12 */
#define DC_arzero_disable 0x0
#define DC_arzero_enable 0x1000
#define DC_sgnzero_MASK 0xffffdfff /* bit 13 */
#define DC_sgnzero_disable 0x0
#define DC_sgnzero_enable 0x2000
#define DC_shftzero_MASK 0xffffbfff /* bit 14 */
#define DC_shftzero_disable 0x0
#define DC_shftzero_enable 0x4000
#define DC_bop_MASK 0xfff0ffff /* bits 16-19 */
#define DC_bop_SHIFT 16
#define DC_trans_MASK 0xff0fffff /* bits 20-23 */
#define DC_trans_SHIFT 20
#define DC_bltmod_MASK 0xe1ffffff /* bits 25-28 */
#define DC_bltmod_bmonolef 0x0 /* val 0, shift 25 */
#define DC_bltmod_bmonowf 0x8000000 /* val 4, shift 25 */
#define DC_bltmod_bplan 0x2000000 /* val 1, shift 25 */
#define DC_bltmod_bfcol 0x4000000 /* val 2, shift 25 */
#define DC_bltmod_bu32bgr 0x6000000 /* val 3, shift 25 */
#define DC_bltmod_bu32rgb 0xe000000 /* val 7, shift 25 */
#define DC_bltmod_bu24bgr 0x16000000 /* val 11, shift 25 */
#define DC_bltmod_bu24rgb 0x1e000000 /* val 15, shift 25 */
#define DC_pattern_MASK 0xdfffffff /* bit 29 */
#define DC_pattern_disable 0x0
#define DC_pattern_enable 0x20000000
#define DC_transc_MASK 0xbfffffff /* bit 30 */
#define DC_transc_disable 0x0
#define DC_transc_enable 0x40000000
#define DC_clipdis_MASK 0x7fffffff /* bit 31 */
#define DC_clipdis_disable 0x0
#define DC_clipdis_enable 0x80000000
#define DS_dwgsyncaddr_MASK 0x3 /* bits 2-31 */
#define DS_dwgsyncaddr_SHIFT 2
#define FS_fifocount_MASK 0xffffff80 /* bits 0-6 */
#define FS_fifocount_SHIFT 0
#define FS_bfull_MASK 0xfffffeff /* bit 8 */
#define FS_bfull_disable 0x0
#define FS_bfull_enable 0x100
#define FS_bempty_MASK 0xfffffdff /* bit 9 */
#define FS_bempty_disable 0x0
#define FS_bempty_enable 0x200
#define XA_fxleft_MASK 0xffff0000 /* bits 0-15 */
#define XA_fxleft_SHIFT 0
#define XA_fxright_MASK 0xffff /* bits 16-31 */
#define XA_fxright_SHIFT 16
#define IC_softrapiclr_MASK 0xfffffffe /* bit 0 */
#define IC_softrapiclr_disable 0x0
#define IC_softrapiclr_enable 0x1
#define IC_pickiclr_MASK 0xfffffffb /* bit 2 */
#define IC_pickiclr_disable 0x0
#define IC_pickiclr_enable 0x4
#define IC_vlineiclr_MASK 0xffffffdf /* bit 5 */
#define IC_vlineiclr_disable 0x0
#define IC_vlineiclr_enable 0x20
#define IC_wiclr_MASK 0xffffff7f /* bit 7 */
#define IC_wiclr_disable 0x0
#define IC_wiclr_enable 0x80
#define IC_wciclr_MASK 0xfffffeff /* bit 8 */
#define IC_wciclr_disable 0x0
#define IC_wciclr_enable 0x100
#define IE_softrapien_MASK 0xfffffffe /* bit 0 */
#define IE_softrapien_disable 0x0
#define IE_softrapien_enable 0x1
#define IE_pickien_MASK 0xfffffffb /* bit 2 */
#define IE_pickien_disable 0x0
#define IE_pickien_enable 0x4
#define IE_vlineien_MASK 0xffffffdf /* bit 5 */
#define IE_vlineien_disable 0x0
#define IE_vlineien_enable 0x20
#define IE_extien_MASK 0xffffffbf /* bit 6 */
#define IE_extien_disable 0x0
#define IE_extien_enable 0x40
#define IE_wien_MASK 0xffffff7f /* bit 7 */
#define IE_wien_disable 0x0
#define IE_wien_enable 0x80
#define IE_wcien_MASK 0xfffffeff /* bit 8 */
#define IE_wcien_disable 0x0
#define IE_wcien_enable 0x100
#define MA_pwidth_MASK 0xfffffffc /* bits 0-1 */
#define MA_pwidth_8 0x0 /* val 0, shift 0 */
#define MA_pwidth_16 0x1 /* val 1, shift 0 */
#define MA_pwidth_32 0x2 /* val 2, shift 0 */
#define MA_pwidth_24 0x3 /* val 3, shift 0 */
#define MA_zwidth_MASK 0xffffffe7 /* bits 3-4 */
#define MA_zwidth_16 0x0 /* val 0, shift 3 */
#define MA_zwidth_32 0x8 /* val 1, shift 3 */
#define MA_zwidth_15 0x10 /* val 2, shift 3 */
#define MA_zwidth_24 0x18 /* val 3, shift 3 */
#define MA_memreset_MASK 0xffff7fff /* bit 15 */
#define MA_memreset_disable 0x0
#define MA_memreset_enable 0x8000
#define MA_fogen_MASK 0xfbffffff /* bit 26 */
#define MA_fogen_disable 0x0
#define MA_fogen_enable 0x4000000
#define MA_tlutload_MASK 0xdfffffff /* bit 29 */
#define MA_tlutload_disable 0x0
#define MA_tlutload_enable 0x20000000
#define MA_nodither_MASK 0xbfffffff /* bit 30 */
#define MA_nodither_disable 0x0
#define MA_nodither_enable 0x40000000
#define MA_dit555_MASK 0x7fffffff /* bit 31 */
#define MA_dit555_disable 0x0
#define MA_dit555_enable 0x80000000
#define MCWS_casltncy_MASK 0xfffffff8 /* bits 0-2 */
#define MCWS_casltncy_SHIFT 0
#define MCWS_rrddelay_MASK 0xffffffcf /* bits 4-5 */
#define MCWS_rcddelay_MASK 0xfffffe7f /* bits 7-8 */
#define MCWS_rasmin_MASK 0xffffe3ff /* bits 10-12 */
#define MCWS_rasmin_SHIFT 10
#define MCWS_rpdelay_MASK 0xffff3fff /* bits 14-15 */
#define MCWS_wrdelay_MASK 0xfff3ffff /* bits 18-19 */
#define MCWS_rddelay_MASK 0xffdfffff /* bit 21 */
#define MCWS_rddelay_disable 0x0
#define MCWS_rddelay_enable 0x200000
#define MCWS_smrdelay_MASK 0xfe7fffff /* bits 23-24 */
#define MCWS_bwcdelay_MASK 0xf3ffffff /* bits 26-27 */
#define MCWS_bpldelay_MASK 0x1fffffff /* bits 29-31 */
#define MCWS_bpldelay_SHIFT 29
#define MRB_mclkbrd0_MASK 0xfffffff0 /* bits 0-3 */
#define MRB_mclkbrd0_SHIFT 0
#define MRB_mclkbrd1_MASK 0xfffffe1f /* bits 5-8 */
#define MRB_mclkbrd1_SHIFT 5
#define MRB_strmfctl_MASK 0xff3fffff /* bits 22-23 */
#define MRB_mrsopcod_MASK 0xe1ffffff /* bits 25-28 */
#define MRB_mrsopcod_SHIFT 25
#define OM_dmamod_MASK 0xfffffff3 /* bits 2-3 */
#define OM_dmamod_general 0x0 /* val 0, shift 2 */
#define OM_dmamod_blit 0x4 /* val 1, shift 2 */
#define OM_dmamod_vector 0x8 /* val 2, shift 2 */
#define OM_dmamod_vertex 0xc /* val 3, shift 2 */
#define OM_dmadatasiz_MASK 0xfffffcff /* bits 8-9 */
#define OM_dmadatasiz_8 0x0 /* val 0, shift 8 */
#define OM_dmadatasiz_16 0x100 /* val 1, shift 8 */
#define OM_dmadatasiz_32 0x200 /* val 2, shift 8 */
#define OM_dirdatasiz_MASK 0xfffcffff /* bits 16-17 */
#define OM_dirdatasiz_8 0x0 /* val 0, shift 16 */
#define OM_dirdatasiz_16 0x10000 /* val 1, shift 16 */
#define OM_dirdatasiz_32 0x20000 /* val 2, shift 16 */
#define P_iy_MASK 0xffffe000 /* bits 0-12 */
#define P_iy_SHIFT 0
#define P_ylin_MASK 0xffff7fff /* bit 15 */
#define P_ylin_disable 0x0
#define P_ylin_enable 0x8000
#define PDCA_primod_MASK 0xfffffffc /* bits 0-1 */
#define PDCA_primod_general 0x0 /* val 0, shift 0 */
#define PDCA_primod_blit 0x1 /* val 1, shift 0 */
#define PDCA_primod_vector 0x2 /* val 2, shift 0 */
#define PDCA_primod_vertex 0x3 /* val 3, shift 0 */
#define PDCA_primaddress_MASK 0x3 /* bits 2-31 */
#define PDCA_primaddress_SHIFT 2
#define PDEA_primnostart_MASK 0xfffffffe /* bit 0 */
#define PDEA_primnostart_disable 0x0
#define PDEA_primnostart_enable 0x1
#define PDEA_pagpxfer_MASK 0xfffffffd /* bit 1 */
#define PDEA_pagpxfer_disable 0x0
#define PDEA_pagpxfer_enable 0x2
#define PDEA_primend_MASK 0x3 /* bits 2-31 */
#define PDEA_primend_SHIFT 2
#define PLS_primptren0_MASK 0xfffffffe /* bit 0 */
#define PLS_primptren0_disable 0x0
#define PLS_primptren0_enable 0x1
#define PLS_primptren1_MASK 0xfffffffd /* bit 1 */
#define PLS_primptren1_disable 0x0
#define PLS_primptren1_enable 0x2
#define PLS_primptr_MASK 0x7 /* bits 3-31 */
#define PLS_primptr_SHIFT 3
#define R_softreset_MASK 0xfffffffe /* bit 0 */
#define R_softreset_disable 0x0
#define R_softreset_enable 0x1
#define R_softextrst_MASK 0xfffffffd /* bit 1 */
#define R_softextrst_disable 0x0
#define R_softextrst_enable 0x2
#define SDCA_secmod_MASK 0xfffffffc /* bits 0-1 */
#define SDCA_secmod_general 0x0 /* val 0, shift 0 */
#define SDCA_secmod_blit 0x1 /* val 1, shift 0 */
#define SDCA_secmod_vector 0x2 /* val 2, shift 0 */
#define SDCA_secmod_vertex 0x3 /* val 3, shift 0 */
#define SDCA_secaddress_MASK 0x3 /* bits 2-31 */
#define SDCA_secaddress_SHIFT 2
#define SDEA_sagpxfer_MASK 0xfffffffd /* bit 1 */
#define SDEA_sagpxfer_disable 0x0
#define SDEA_sagpxfer_enable 0x2
#define SDEA_secend_MASK 0x3 /* bits 2-31 */
#define SDEA_secend_SHIFT 2
#define SETDCA_setupmod_MASK 0xfffffffc /* bits 0-1 */
#define SETDCA_setupmod_vertlist 0x0 /* val 0, shift 0 */
#define SETDCA_setupaddress_MASK 0x3 /* bits 2-31 */
#define SETDCA_setupaddress_SHIFT 2
#define SETDEA_setupagpxfer_MASK 0xfffffffd /* bit 1 */
#define SETDEA_setupagpxfer_disable 0x0
#define SETDEA_setupagpxfer_enable 0x2
#define SETDEA_setupend_MASK 0x3 /* bits 2-31 */
#define SETDEA_setupend_SHIFT 2
#define S_sdydxl_MASK 0xfffffffe /* bit 0 */
#define S_sdydxl_y 0x0
#define S_sdydxl_x 0x1
#define S_scanleft_MASK 0xfffffffe /* bit 0 */
#define S_scanleft_disable 0x0
#define S_scanleft_enable 0x1
#define S_sdxl_MASK 0xfffffffd /* bit 1 */
#define S_sdxl_pos 0x0
#define S_sdxl_neg 0x2
#define S_sdy_MASK 0xfffffffb /* bit 2 */
#define S_sdy_pos 0x0
#define S_sdy_neg 0x4
#define S_sdxr_MASK 0xffffffdf /* bit 5 */
#define S_sdxr_pos 0x0
#define S_sdxr_neg 0x20
#define S_brkleft_MASK 0xfffffeff /* bit 8 */
#define S_brkleft_disable 0x0
#define S_brkleft_enable 0x100
#define S_errorinit_MASK 0x7fffffff /* bit 31 */
#define S_errorinit_disable 0x0
#define S_errorinit_enable 0x80000000
#define FSC_x_off_MASK 0xfffffff0 /* bits 0-3 */
#define FSC_x_off_SHIFT 0
#define FSC_funcnt_MASK 0xffffff80 /* bits 0-6 */
#define FSC_funcnt_SHIFT 0
#define FSC_y_off_MASK 0xffffff8f /* bits 4-6 */
#define FSC_y_off_SHIFT 4
#define FSC_funoff_MASK 0xffc0ffff /* bits 16-21 */
#define FSC_funoff_SHIFT 16
#define FSC_stylelen_MASK 0xffc0ffff /* bits 16-21 */
#define FSC_stylelen_SHIFT 16
#define STH_softraphand_MASK 0x3 /* bits 2-31 */
#define STH_softraphand_SHIFT 2
#define SO_srcmap_MASK 0xfffffffe /* bit 0 */
#define SO_srcmap_fb 0x0
#define SO_srcmap_sys 0x1
#define SO_srcacc_MASK 0xfffffffd /* bit 1 */
#define SO_srcacc_pci 0x0
#define SO_srcacc_agp 0x2
#define SO_srcorg_MASK 0x7 /* bits 3-31 */
#define SO_srcorg_SHIFT 3
#define STAT_softrapen_MASK 0xfffffffe /* bit 0 */
#define STAT_softrapen_disable 0x0
#define STAT_softrapen_enable 0x1
#define STAT_pickpen_MASK 0xfffffffb /* bit 2 */
#define STAT_pickpen_disable 0x0
#define STAT_pickpen_enable 0x4
#define STAT_vsyncsts_MASK 0xfffffff7 /* bit 3 */
#define STAT_vsyncsts_disable 0x0
#define STAT_vsyncsts_enable 0x8
#define STAT_vsyncpen_MASK 0xffffffef /* bit 4 */
#define STAT_vsyncpen_disable 0x0
#define STAT_vsyncpen_enable 0x10
#define STAT_vlinepen_MASK 0xffffffdf /* bit 5 */
#define STAT_vlinepen_disable 0x0
#define STAT_vlinepen_enable 0x20
#define STAT_extpen_MASK 0xffffffbf /* bit 6 */
#define STAT_extpen_disable 0x0
#define STAT_extpen_enable 0x40
#define STAT_wpen_MASK 0xffffff7f /* bit 7 */
#define STAT_wpen_disable 0x0
#define STAT_wpen_enable 0x80
#define STAT_wcpen_MASK 0xfffffeff /* bit 8 */
#define STAT_wcpen_disable 0x0
#define STAT_wcpen_enable 0x100
#define STAT_dwgengsts_MASK 0xfffeffff /* bit 16 */
#define STAT_dwgengsts_disable 0x0
#define STAT_dwgengsts_enable 0x10000
#define STAT_endprdmasts_MASK 0xfffdffff /* bit 17 */
#define STAT_endprdmasts_disable 0x0
#define STAT_endprdmasts_enable 0x20000
#define STAT_wbusy_MASK 0xfffbffff /* bit 18 */
#define STAT_wbusy_disable 0x0
#define STAT_wbusy_enable 0x40000
#define STAT_swflag_MASK 0xfffffff /* bits 28-31 */
#define STAT_swflag_SHIFT 28
#define S_sref_MASK 0xffffff00 /* bits 0-7 */
#define S_sref_SHIFT 0
#define S_smsk_MASK 0xffff00ff /* bits 8-15 */
#define S_smsk_SHIFT 8
#define S_swtmsk_MASK 0xff00ffff /* bits 16-23 */
#define S_swtmsk_SHIFT 16
#define SC_smode_MASK 0xfffffff8 /* bits 0-2 */
#define SC_smode_salways 0x0 /* val 0, shift 0 */
#define SC_smode_snever 0x1 /* val 1, shift 0 */
#define SC_smode_se 0x2 /* val 2, shift 0 */
#define SC_smode_sne 0x3 /* val 3, shift 0 */
#define SC_smode_slt 0x4 /* val 4, shift 0 */
#define SC_smode_slte 0x5 /* val 5, shift 0 */
#define SC_smode_sgt 0x6 /* val 6, shift 0 */
#define SC_smode_sgte 0x7 /* val 7, shift 0 */
#define SC_sfailop_MASK 0xffffffc7 /* bits 3-5 */
#define SC_sfailop_keep 0x0 /* val 0, shift 3 */
#define SC_sfailop_zero 0x8 /* val 1, shift 3 */
#define SC_sfailop_replace 0x10 /* val 2, shift 3 */
#define SC_sfailop_incrsat 0x18 /* val 3, shift 3 */
#define SC_sfailop_decrsat 0x20 /* val 4, shift 3 */
#define SC_sfailop_invert 0x28 /* val 5, shift 3 */
#define SC_sfailop_incr 0x30 /* val 6, shift 3 */
#define SC_sfailop_decr 0x38 /* val 7, shift 3 */
#define SC_szfailop_MASK 0xfffffe3f /* bits 6-8 */
#define SC_szfailop_keep 0x0 /* val 0, shift 6 */
#define SC_szfailop_zero 0x40 /* val 1, shift 6 */
#define SC_szfailop_replace 0x80 /* val 2, shift 6 */
#define SC_szfailop_incrsat 0xc0 /* val 3, shift 6 */
#define SC_szfailop_decrsat 0x100 /* val 4, shift 6 */
#define SC_szfailop_invert 0x140 /* val 5, shift 6 */
#define SC_szfailop_incr 0x180 /* val 6, shift 6 */
#define SC_szfailop_decr 0x1c0 /* val 7, shift 6 */
#define SC_szpassop_MASK 0xfffff1ff /* bits 9-11 */
#define SC_szpassop_keep 0x0 /* val 0, shift 9 */
#define SC_szpassop_zero 0x200 /* val 1, shift 9 */
#define SC_szpassop_replace 0x400 /* val 2, shift 9 */
#define SC_szpassop_incrsat 0x600 /* val 3, shift 9 */
#define SC_szpassop_decrsat 0x800 /* val 4, shift 9 */
#define SC_szpassop_invert 0xa00 /* val 5, shift 9 */
#define SC_szpassop_incr 0xc00 /* val 6, shift 9 */
#define SC_szpassop_decr 0xe00 /* val 7, shift 9 */
#define TD1_color1arg2selMASK 0xfffffffc /* bits 0-1 */
#define TD1_color1alphaselMASK 0xffffffe3 /* bits 2-4 */
#define TD1_color1alphaselSHIFT 2
#define TD1_color1arg1alphaMASK 0xffffffdf /* bit 5 */
#define TD1_color1arg1alphadisable 0x0
#define TD1_color1arg1alphaenable 0x20
#define TD1_color1arg1invMASK 0xffffffbf /* bit 6 */
#define TD1_color1arg1invdisable 0x0
#define TD1_color1arg1invenable 0x40
#define TD1_color1arg2alphaMASK 0xffffff7f /* bit 7 */
#define TD1_color1arg2alphadisable 0x0
#define TD1_color1arg2alphaenable 0x80
#define TD1_color1arg2invMASK 0xfffffeff /* bit 8 */
#define TD1_color1arg2invdisable 0x0
#define TD1_color1arg2invenable 0x100
#define TD1_color1alpha1invMASK 0xfffffdff /* bit 9 */
#define TD1_color1alpha1invdisable 0x0
#define TD1_color1alpha1invenable 0x200
#define TD1_color1alpha2invMASK 0xfffffbff /* bit 10 */
#define TD1_color1alpha2invdisable 0x0
#define TD1_color1alpha2invenable 0x400
#define TD1_color1selMASK 0xff9fffff /* bits 21-22 */
#define TD1_color1selarg1 0x0 /* val 0, shift 21 */
#define TD1_color1selarg2 0x200000 /* val 1, shift 21 */
#define TD1_color1seladd 0x400000 /* val 2, shift 21 */
#define TD1_color1selmul 0x600000 /* val 3, shift 21 */
#define TD1_alpha1selMASK 0x3fffffff /* bits 30-31 */
#define TD1_alpha1selarg1 0x0 /* val 0, shift 30 */
#define TD1_alpha1selarg2 0x40000000 /* val 1, shift 30 */
#define TD1_alpha1seladd 0x80000000 /* val 2, shift 30 */
#define TD1_alpha1selmul 0xc0000000 /* val 3, shift 30 */
#define TST_ramtsten_MASK 0xfffffffe /* bit 0 */
#define TST_ramtsten_disable 0x0
#define TST_ramtsten_enable 0x1
#define TST_ramtstdone_MASK 0xfffffffd /* bit 1 */
#define TST_ramtstdone_disable 0x0
#define TST_ramtstdone_enable 0x2
#define TST_wramtstpass_MASK 0xfffffffb /* bit 2 */
#define TST_wramtstpass_disable 0x0
#define TST_wramtstpass_enable 0x4
#define TST_tcachetstpass_MASK 0xfffffff7 /* bit 3 */
#define TST_tcachetstpass_disable 0x0
#define TST_tcachetstpass_enable 0x8
#define TST_tluttstpass_MASK 0xffffffef /* bit 4 */
#define TST_tluttstpass_disable 0x0
#define TST_tluttstpass_enable 0x10
#define TST_luttstpass_MASK 0xffffffdf /* bit 5 */
#define TST_luttstpass_disable 0x0
#define TST_luttstpass_enable 0x20
#define TST_besramtstpass_MASK 0xffffffbf /* bit 6 */
#define TST_besramtstpass_disable 0x0
#define TST_besramtstpass_enable 0x40
#define TST_ringen_MASK 0xfffffeff /* bit 8 */
#define TST_ringen_disable 0x0
#define TST_ringen_enable 0x100
#define TST_apllbyp_MASK 0xfffffdff /* bit 9 */
#define TST_apllbyp_disable 0x0
#define TST_apllbyp_enable 0x200
#define TST_hiten_MASK 0xfffffbff /* bit 10 */
#define TST_hiten_disable 0x0
#define TST_hiten_enable 0x400
#define TST_tmode_MASK 0xffffc7ff /* bits 11-13 */
#define TST_tmode_SHIFT 11
#define TST_tclksel_MASK 0xfffe3fff /* bits 14-16 */
#define TST_tclksel_SHIFT 14
#define TST_ringcnten_MASK 0xfffdffff /* bit 17 */
#define TST_ringcnten_disable 0x0
#define TST_ringcnten_enable 0x20000
#define TST_ringcnt_MASK 0xc003ffff /* bits 18-29 */
#define TST_ringcnt_SHIFT 18
#define TST_ringcntclksl_MASK 0xbfffffff /* bit 30 */
#define TST_ringcntclksl_disable 0x0
#define TST_ringcntclksl_enable 0x40000000
#define TST_biosboot_MASK 0x7fffffff /* bit 31 */
#define TST_biosboot_disable 0x0
#define TST_biosboot_enable 0x80000000
#define TMC_tformat_MASK 0xfffffff0 /* bits 0-3 */
#define TMC_tformat_tw4 0x0 /* val 0, shift 0 */
#define TMC_tformat_tw8 0x1 /* val 1, shift 0 */
#define TMC_tformat_tw15 0x2 /* val 2, shift 0 */
#define TMC_tformat_tw16 0x3 /* val 3, shift 0 */
#define TMC_tformat_tw12 0x4 /* val 4, shift 0 */
#define TMC_tformat_tw32 0x6 /* val 6, shift 0 */
#define TMC_tformat_tw422 0xa /* val 10, shift 0 */
#define TMC_tpitchlin_MASK 0xfffffeff /* bit 8 */
#define TMC_tpitchlin_disable 0x0
#define TMC_tpitchlin_enable 0x100
#define TMC_tpitchext_MASK 0xfff001ff /* bits 9-19 */
#define TMC_tpitchext_SHIFT 9
#define TMC_tpitch_MASK 0xfff8ffff /* bits 16-18 */
#define TMC_tpitch_SHIFT 16
#define TMC_owalpha_MASK 0xffbfffff /* bit 22 */
#define TMC_owalpha_disable 0x0
#define TMC_owalpha_enable 0x400000
#define TMC_azeroextend_MASK 0xff7fffff /* bit 23 */
#define TMC_azeroextend_disable 0x0
#define TMC_azeroextend_enable 0x800000
#define TMC_decalckey_MASK 0xfeffffff /* bit 24 */
#define TMC_decalckey_disable 0x0
#define TMC_decalckey_enable 0x1000000
#define TMC_takey_MASK 0xfdffffff /* bit 25 */
#define TMC_takey_0 0x0
#define TMC_takey_1 0x2000000
#define TMC_tamask_MASK 0xfbffffff /* bit 26 */
#define TMC_tamask_0 0x0
#define TMC_tamask_1 0x4000000
#define TMC_clampv_MASK 0xf7ffffff /* bit 27 */
#define TMC_clampv_disable 0x0
#define TMC_clampv_enable 0x8000000
#define TMC_clampu_MASK 0xefffffff /* bit 28 */
#define TMC_clampu_disable 0x0
#define TMC_clampu_enable 0x10000000
#define TMC_tmodulate_MASK 0xdfffffff /* bit 29 */
#define TMC_tmodulate_disable 0x0
#define TMC_tmodulate_enable 0x20000000
#define TMC_strans_MASK 0xbfffffff /* bit 30 */
#define TMC_strans_disable 0x0
#define TMC_strans_enable 0x40000000
#define TMC_itrans_MASK 0x7fffffff /* bit 31 */
#define TMC_itrans_disable 0x0
#define TMC_itrans_enable 0x80000000
#define TMC_decalblend_MASK 0xfffffffe /* bit 0 */
#define TMC_decalblend_disable 0x0
#define TMC_decalblend_enable 0x1
#define TMC_idecal_MASK 0xfffffffd /* bit 1 */
#define TMC_idecal_disable 0x0
#define TMC_idecal_enable 0x2
#define TMC_decaldis_MASK 0xfffffffb /* bit 2 */
#define TMC_decaldis_disable 0x0
#define TMC_decaldis_enable 0x4
#define TMC_ckstransdis_MASK 0xffffffef /* bit 4 */
#define TMC_ckstransdis_disable 0x0
#define TMC_ckstransdis_enable 0x10
#define TMC_borderen_MASK 0xffffffdf /* bit 5 */
#define TMC_borderen_disable 0x0
#define TMC_borderen_enable 0x20
#define TMC_specen_MASK 0xffffffbf /* bit 6 */
#define TMC_specen_disable 0x0
#define TMC_specen_enable 0x40
#define TF_minfilter_MASK 0xfffffff0 /* bits 0-3 */
#define TF_minfilter_nrst 0x0 /* val 0, shift 0 */
#define TF_minfilter_bilin 0x2 /* val 2, shift 0 */
#define TF_minfilter_cnst 0x3 /* val 3, shift 0 */
#define TF_minfilter_mm1s 0x8 /* val 8, shift 0 */
#define TF_minfilter_mm2s 0x9 /* val 9, shift 0 */
#define TF_minfilter_mm4s 0xa /* val 10, shift 0 */
#define TF_minfilter_mm8s 0xc /* val 12, shift 0 */
#define TF_magfilter_MASK 0xffffff0f /* bits 4-7 */
#define TF_magfilter_nrst 0x0 /* val 0, shift 4 */
#define TF_magfilter_bilin 0x20 /* val 2, shift 4 */
#define TF_magfilter_cnst 0x30 /* val 3, shift 4 */
#define TF_avgstride_MASK 0xfff7ffff /* bit 19 */
#define TF_avgstride_disable 0x0
#define TF_avgstride_enable 0x80000
#define TF_filteralpha_MASK 0xffefffff /* bit 20 */
#define TF_filteralpha_disable 0x0
#define TF_filteralpha_enable 0x100000
#define TF_fthres_MASK 0xe01fffff /* bits 21-28 */
#define TF_fthres_SHIFT 21
#define TF_mapnb_MASK 0x1fffffff /* bits 29-31 */
#define TF_mapnb_SHIFT 29
#define TH_th_MASK 0xffffffc0 /* bits 0-5 */
#define TH_th_SHIFT 0
#define TH_rfh_MASK 0xffff81ff /* bits 9-14 */
#define TH_rfh_SHIFT 9
#define TH_thmask_MASK 0xe003ffff /* bits 18-28 */
#define TH_thmask_SHIFT 18
#define TO_texorgmap_MASK 0xfffffffe /* bit 0 */
#define TO_texorgmap_fb 0x0
#define TO_texorgmap_sys 0x1
#define TO_texorgacc_MASK 0xfffffffd /* bit 1 */
#define TO_texorgacc_pci 0x0
#define TO_texorgacc_agp 0x2
#define TO_texorg_MASK 0x1f /* bits 5-31 */
#define TO_texorg_SHIFT 5
#define TT_tckey_MASK 0xffff0000 /* bits 0-15 */
#define TT_tckey_SHIFT 0
#define TT_tkmask_MASK 0xffff /* bits 16-31 */
#define TT_tkmask_SHIFT 16
#define TT_tckeyh_MASK 0xffff0000 /* bits 0-15 */
#define TT_tckeyh_SHIFT 0
#define TT_tkmaskh_MASK 0xffff /* bits 16-31 */
#define TT_tkmaskh_SHIFT 16
#define TW_tw_MASK 0xffffffc0 /* bits 0-5 */
#define TW_tw_SHIFT 0
#define TW_rfw_MASK 0xffff81ff /* bits 9-14 */
#define TW_rfw_SHIFT 9
#define TW_twmask_MASK 0xe003ffff /* bits 18-28 */
#define TW_twmask_SHIFT 18
#define WAS_seqdst0_MASK 0xffffffc0 /* bits 0-5 */
#define WAS_seqdst0_SHIFT 0
#define WAS_seqdst1_MASK 0xfffff03f /* bits 6-11 */
#define WAS_seqdst1_SHIFT 6
#define WAS_seqdst2_MASK 0xfffc0fff /* bits 12-17 */
#define WAS_seqdst2_SHIFT 12
#define WAS_seqdst3_MASK 0xff03ffff /* bits 18-23 */
#define WAS_seqdst3_SHIFT 18
#define WAS_seqlen_MASK 0xfcffffff /* bits 24-25 */
#define WAS_wfirsttag_MASK 0xfbffffff /* bit 26 */
#define WAS_wfirsttag_disable 0x0
#define WAS_wfirsttag_enable 0x4000000
#define WAS_wsametag_MASK 0xf7ffffff /* bit 27 */
#define WAS_wsametag_disable 0x0
#define WAS_wsametag_enable 0x8000000
#define WAS_seqoff_MASK 0xefffffff /* bit 28 */
#define WAS_seqoff_disable 0x0
#define WAS_seqoff_enable 0x10000000
#define WMA_wcodeaddr_MASK 0xff /* bits 8-31 */
#define WMA_wcodeaddr_SHIFT 8
#define WF_walustsflag_MASK 0xffffff00 /* bits 0-7 */
#define WF_walustsflag_SHIFT 0
#define WF_walucfgflag_MASK 0xffff00ff /* bits 8-15 */
#define WF_walucfgflag_SHIFT 8
#define WF_wprgflag_MASK 0xffff /* bits 16-31 */
#define WF_wprgflag_SHIFT 16
#define WF1_walustsflag1_MASK 0xffffff00 /* bits 0-7 */
#define WF1_walustsflag1_SHIFT 0
#define WF1_walucfgflag1_MASK 0xffff00ff /* bits 8-15 */
#define WF1_walucfgflag1_SHIFT 8
#define WF1_wprgflag1_MASK 0xffff /* bits 16-31 */
#define WF1_wprgflag1_SHIFT 16
#define WGV_wgetmsbmin_MASK 0xffffffe0 /* bits 0-4 */
#define WGV_wgetmsbmin_SHIFT 0
#define WGV_wgetmsbmax_MASK 0xffffe0ff /* bits 8-12 */
#define WGV_wgetmsbmax_SHIFT 8
#define WGV_wbrklefttop_MASK 0xfffeffff /* bit 16 */
#define WGV_wbrklefttop_disable 0x0
#define WGV_wbrklefttop_enable 0x10000
#define WGV_wfastcrop_MASK 0xfffdffff /* bit 17 */
#define WGV_wfastcrop_disable 0x0
#define WGV_wfastcrop_enable 0x20000
#define WGV_wcentersnap_MASK 0xfffbffff /* bit 18 */
#define WGV_wcentersnap_disable 0x0
#define WGV_wcentersnap_enable 0x40000
#define WGV_wbrkrighttop_MASK 0xfff7ffff /* bit 19 */
#define WGV_wbrkrighttop_disable 0x0
#define WGV_wbrkrighttop_enable 0x80000
#define WIA_wmode_MASK 0xfffffffc /* bits 0-1 */
#define WIA_wmode_suspend 0x0 /* val 0, shift 0 */
#define WIA_wmode_resume 0x1 /* val 1, shift 0 */
#define WIA_wmode_jump 0x2 /* val 2, shift 0 */
#define WIA_wmode_start 0x3 /* val 3, shift 0 */
#define WIA_wagp_MASK 0xfffffffb /* bit 2 */
#define WIA_wagp_pci 0x0
#define WIA_wagp_agp 0x4
#define WIA_wiaddr_MASK 0x7 /* bits 3-31 */
#define WIA_wiaddr_SHIFT 3
#define WIA2_wmode_MASK 0xfffffffc /* bits 0-1 */
#define WIA2_wmode_suspend 0x0 /* val 0, shift 0 */
#define WIA2_wmode_resume 0x1 /* val 1, shift 0 */
#define WIA2_wmode_jump 0x2 /* val 2, shift 0 */
#define WIA2_wmode_start 0x3 /* val 3, shift 0 */
#define WIA2_wagp_MASK 0xfffffffb /* bit 2 */
#define WIA2_wagp_pci 0x0
#define WIA2_wagp_agp 0x4
#define WIA2_wiaddr_MASK 0x7 /* bits 3-31 */
#define WIA2_wiaddr_SHIFT 3
#define WIMA_wimemaddr_MASK 0xffffff00 /* bits 0-7 */
#define WIMA_wimemaddr_SHIFT 0
#define WM_wucodecache_MASK 0xfffffffe /* bit 0 */
#define WM_wucodecache_disable 0x0
#define WM_wucodecache_enable 0x1
#define WM_wmaster_MASK 0xfffffffd /* bit 1 */
#define WM_wmaster_disable 0x0
#define WM_wmaster_enable 0x2
#define WM_wcacheflush_MASK 0xfffffff7 /* bit 3 */
#define WM_wcacheflush_disable 0x0
#define WM_wcacheflush_enable 0x8
#define WVS_wvrtxsz_MASK 0xffffffc0 /* bits 0-5 */
#define WVS_wvrtxsz_SHIFT 0
#define WVS_primsz_MASK 0xffffc0ff /* bits 8-13 */
#define WVS_primsz_SHIFT 8
#define XYEA_x_end_MASK 0xffff0000 /* bits 0-15 */
#define XYEA_x_end_SHIFT 0
#define XYEA_y_end_MASK 0xffff /* bits 16-31 */
#define XYEA_y_end_SHIFT 16
#define XYSA_x_start_MASK 0xffff0000 /* bits 0-15 */
#define XYSA_x_start_SHIFT 0
#define XYSA_y_start_MASK 0xffff /* bits 16-31 */
#define XYSA_y_start_SHIFT 16
#define YA_ydst_MASK 0xff800000 /* bits 0-22 */
#define YA_ydst_SHIFT 0
#define YA_sellin_MASK 0x1fffffff /* bits 29-31 */
#define YA_sellin_SHIFT 29
#define YDL_length_MASK 0xffff0000 /* bits 0-15 */
#define YDL_length_SHIFT 0
#define YDL_yval_MASK 0xffff /* bits 16-31 */
#define YDL_yval_SHIFT 16
#define ZO_zorgmap_MASK 0xfffffffe /* bit 0 */
#define ZO_zorgmap_fb 0x0
#define ZO_zorgmap_sys 0x1
#define ZO_zorgacc_MASK 0xfffffffd /* bit 1 */
#define ZO_zorgacc_pci 0x0
#define ZO_zorgacc_agp 0x2
#define ZO_zorg_MASK 0x3 /* bits 2-31 */
#define ZO_zorg_SHIFT 2
/**************** (END) AUTOMATICLY GENERATED REGISTER FILE ******************/
#endif /* _MGAREGS_H_ */

View File

@ -1,6 +1,6 @@
/* proc.c -- /proc support for DRM -*- linux-c -*-
* Created: Mon Jan 11 09:48:47 1999 by faith@precisioninsight.com
* Revised: Fri Dec 3 09:44:16 1999 by faith@precisioninsight.com
* Revised: Sun Feb 13 23:41:04 2000 by kevin@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
@ -25,7 +25,7 @@
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/proc.c,v 1.4 1999/08/20 15:36:46 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/proc.c,v 1.2 1999/12/14 01:33:58 robin Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/proc.c,v 1.5 2000/02/14 06:27:28 martin Exp $
*
*/
@ -79,26 +79,26 @@ int drm_proc_init(drm_device_t *dev)
struct proc_dir_entry *ent;
int i, j;
drm_root = create_proc_entry("graphics", S_IFDIR, NULL);
drm_root = create_proc_entry("dri", S_IFDIR, NULL);
if (!drm_root) {
DRM_ERROR("Cannot create /proc/graphics\n");
DRM_ERROR("Cannot create /proc/dri\n");
return -1;
}
/* Instead of doing this search, we should
add some global support for /proc/graphics. */
add some global support for /proc/dri. */
for (i = 0; i < 8; i++) {
sprintf(drm_slot_name, "graphics/%d", i);
sprintf(drm_slot_name, "dri/%d", i);
drm_dev_root = create_proc_entry(drm_slot_name, S_IFDIR, NULL);
if (!drm_dev_root) {
DRM_ERROR("Cannot create /proc/%s\n", drm_slot_name);
remove_proc_entry("graphics", NULL);
remove_proc_entry("dri", NULL);
}
if (drm_dev_root->nlink == 2) break;
drm_dev_root = NULL;
}
if (!drm_dev_root) {
DRM_ERROR("Cannot find slot in /proc/graphics\n");
DRM_ERROR("Cannot find slot in /proc/dri\n");
return -1;
}
@ -112,7 +112,7 @@ int drm_proc_init(drm_device_t *dev)
remove_proc_entry(drm_proc_list[i].name,
drm_dev_root);
remove_proc_entry(drm_slot_name, NULL);
remove_proc_entry("graphics", NULL);
remove_proc_entry("dri", NULL);
return -1;
}
ent->read_proc = drm_proc_list[i].f;
@ -135,7 +135,7 @@ int drm_proc_cleanup(void)
}
remove_proc_entry(drm_slot_name, NULL);
}
remove_proc_entry("graphics", NULL);
remove_proc_entry("dri", NULL);
remove_proc_entry(DRM_NAME, NULL);
}
drm_root = drm_dev_root = NULL;

View File

@ -1,6 +1,6 @@
/* vm.c -- Memory mapping for DRM -*- linux-c -*-
* Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
* Revised: Mon Dec 6 16:54:35 1999 by faith@precisioninsight.com
* Revised: Mon Feb 14 00:16:45 2000 by kevin@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
@ -25,7 +25,7 @@
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/vm.c,v 1.7 1999/08/21 02:48:34 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/vm.c,v 1.2 1999/12/14 01:33:58 robin Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/vm.c,v 1.4 2000/02/14 06:27:28 martin Exp $
*
*/

View File

@ -1,6 +1,6 @@
/* drm.h -- Header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
* Revised: Mon Dec 6 17:11:19 1999 by faith@precisioninsight.com
* Revised: Mon Feb 14 00:15:23 2000 by kevin@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All rights reserved.
@ -25,7 +25,7 @@
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drm.h,v 1.46 1999/08/20 20:00:53 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drm.h,v 1.2 1999/12/14 01:33:56 robin Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drm.h,v 1.4 2000/02/14 06:27:26 martin Exp $
*
* Acknowledgements:
* Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic cmpxchg.

View File

@ -1,6 +1,6 @@
/* drm.h -- Header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
* Revised: Mon Dec 6 17:11:19 1999 by faith@precisioninsight.com
* Revised: Mon Feb 14 00:15:23 2000 by kevin@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All rights reserved.
@ -25,7 +25,7 @@
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drm.h,v 1.46 1999/08/20 20:00:53 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drm.h,v 1.2 1999/12/14 01:33:56 robin Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drm.h,v 1.4 2000/02/14 06:27:26 martin Exp $
*
* Acknowledgements:
* Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic cmpxchg.

View File

@ -1,6 +1,6 @@
/* drmstat.c -- DRM device status and testing program
* Created: Tue Jan 5 08:19:24 1999 by faith@precisioninsight.com
* Revised: Mon Dec 6 10:33:46 1999 by faith@precisioninsight.com
* Revised: Sun Feb 13 23:35:00 2000 by kevin@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
@ -25,7 +25,7 @@
* DEALINGS IN THE SOFTWARE.
*
* $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmstat.c,v 1.28 1999/08/04 18:12:11 faith Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmstat.c,v 1.2 1999/12/14 01:33:56 robin Exp $
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmstat.c,v 1.5 2000/02/14 06:27:27 martin Exp $
*
*/
@ -218,8 +218,8 @@ int main(int argc, char **argv)
info->list[i].low_mark,
info->list[i].high_mark);
}
printf("===== /proc/graphics/0/mem =====\n");
sprintf(buf, "cat /proc/graphics/0/mem");
printf("===== /proc/dri/0/mem =====\n");
sprintf(buf, "cat /proc/dri/0/mem");
system(buf);
#if 1
if (!(bufs = drmMapBufs(fd))) {
@ -234,8 +234,8 @@ int main(int argc, char **argv)
bufs->list[i].total,
bufs->list[i].address);
}
printf("===== /proc/graphics/0/vma =====\n");
sprintf(buf, "cat /proc/graphics/0/vma");
printf("===== /proc/dri/0/vma =====\n");
sprintf(buf, "cat /proc/dri/0/vma");
system(buf);
#endif
break;
@ -249,8 +249,8 @@ int main(int argc, char **argv)
return 1;
}
printf("0x%08lx:0x%04lx added\n", offset, size);
printf("===== /proc/graphics/0/mem =====\n");
sprintf(buf, "cat /proc/graphics/0/mem");
printf("===== /proc/dri/0/mem =====\n");
sprintf(buf, "cat /proc/dri/0/mem");
system(buf);
break;
case 'r':
@ -266,8 +266,8 @@ int main(int argc, char **argv)
return 1;
}
printf("0x%08lx:0x%04lx added\n", offset, size);
printf("===== /proc/graphics/0/mem =====\n");
sprintf(buf, "cat /proc/graphics/0/mem");
printf("===== /proc/dri/0/mem =====\n");
sprintf(buf, "cat /proc/dri/0/mem");
system(buf);
break;
case 's':
@ -280,7 +280,7 @@ int main(int argc, char **argv)
return 1;
}
printf("0x%04lx byte shm added at 0x%08lx\n", size, handle);
sprintf(buf, "cat /proc/graphics/0/vm");
sprintf(buf, "cat /proc/dri/0/vm");
system(buf);
break;
case 'P':
@ -293,12 +293,12 @@ int main(int argc, char **argv)
}
printf("0x%08lx:0x%04lx mapped at %p for pid %d\n",
offset, size, address, getpid());
printf("===== /proc/graphics/0/vma =====\n");
sprintf(buf, "cat /proc/graphics/0/vma");
printf("===== /proc/dri/0/vma =====\n");
sprintf(buf, "cat /proc/dri/0/vma");
system(buf);
mprotect((void *)offset, size, PROT_READ);
printf("===== /proc/graphics/0/vma =====\n");
sprintf(buf, "cat /proc/graphics/0/vma");
printf("===== /proc/dri/0/vma =====\n");
sprintf(buf, "cat /proc/dri/0/vma");
system(buf);
break;
case 'w':
@ -316,10 +316,10 @@ int main(int argc, char **argv)
sprintf(buf, "cat /proc/%d/maps", getpid());
system(buf);
printf("===== /proc/grphics/0/mem =====\n");
sprintf(buf, "cat /proc/graphics/0/mem");
sprintf(buf, "cat /proc/dri/0/mem");
system(buf);
printf("===== /proc/graphics/0/vma =====\n");
sprintf(buf, "cat /proc/graphics/0/vma");
printf("===== /proc/dri/0/vma =====\n");
sprintf(buf, "cat /proc/dri/0/vma");
system(buf);
printf("===== READING =====\n");
for (i = 0; i < 0x10; i++)
@ -336,8 +336,8 @@ int main(int argc, char **argv)
for (i = 0; i < 0x10; i++)
printf("%02x ", (unsigned int)((unsigned char *)address)[i]);
printf("\n");
printf("===== /proc/graphics/0/vma =====\n");
sprintf(buf, "cat /proc/graphics/0/vma");
printf("===== /proc/dri/0/vma =====\n");
sprintf(buf, "cat /proc/dri/0/vma");
system(buf);
break;
case 'L':