From a1780925fb461c736bae7e51de0d3a1e909548f2 Mon Sep 17 00:00:00 2001 From: Keith Whitwell Date: Tue, 22 Apr 2003 11:31:55 +0000 Subject: [PATCH] Move a chunk of gamma-specific code out of drm_dma.h. Remove unused 'DRM_FLAG_NOCTX' option. --- linux-core/drmP.h | 10 -- linux-core/drm_context.c | 9 +- linux-core/drm_dma.c | 270 ----------------------------------- linux-core/drm_init.c | 5 - linux/drmP.h | 10 -- linux/drm_context.h | 9 +- linux/drm_dma.h | 270 ----------------------------------- linux/drm_init.h | 5 - linux/gamma_context.h | 11 +- linux/gamma_drv.c | 1 + linux/gamma_drv.h | 10 ++ linux/gamma_old_dma.h | 300 +++++++++++++++++++++++++++++++++++++++ 12 files changed, 326 insertions(+), 584 deletions(-) create mode 100644 linux/gamma_old_dma.h diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 338345be..c530ca0c 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -127,7 +127,6 @@ #define DRM_LOCK_SLICE 1 /* Time slice for lock, in jiffies */ #define DRM_FLAG_DEBUG 0x01 -#define DRM_FLAG_NOCTX 0x02 #define DRM_MEM_DMA 0 #define DRM_MEM_SAREA 1 @@ -814,15 +813,6 @@ extern int DRM(dma_setup)(drm_device_t *dev); extern void DRM(dma_takedown)(drm_device_t *dev); extern void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf); extern void DRM(reclaim_buffers)( struct file *filp ); -#if __HAVE_OLD_DMA -/* GH: This is a dirty hack for now... - */ -extern void DRM(clear_next_buffer)(drm_device_t *dev); -extern int DRM(select_queue)(drm_device_t *dev, - void (*wrapper)(unsigned long)); -extern int DRM(dma_enqueue)(struct file *filp, drm_dma_t *dma); -extern int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma); -#endif #if __HAVE_DMA_IRQ extern int DRM(control)( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ); diff --git a/linux-core/drm_context.c b/linux-core/drm_context.c index 646b2236..b446956c 100644 --- a/linux-core/drm_context.c +++ b/linux-core/drm_context.c @@ -36,6 +36,11 @@ #define __NO_VERSION__ #include "drmP.h" +#if !__HAVE_CTX_BITMAP +#error "__HAVE_CTX_BITMAP must be defined" +#endif + + /* ================================================================ * Context bitmap support */ @@ -235,10 +240,6 @@ int DRM(context_switch)( drm_device_t *dev, int old, int new ) return 0; } - if ( DRM(flags) & DRM_FLAG_NOCTX ) { - DRM(context_switch_complete)( dev, new ); - } - return 0; } diff --git a/linux-core/drm_dma.c b/linux-core/drm_dma.c index 45c4d321..640e245d 100644 --- a/linux-core/drm_dma.c +++ b/linux-core/drm_dma.c @@ -180,276 +180,6 @@ void DRM(reclaim_buffers)( struct file *filp ) #endif -/* GH: This is a big hack for now... - */ -#if __HAVE_OLD_DMA - -void DRM(clear_next_buffer)(drm_device_t *dev) -{ - drm_device_dma_t *dma = dev->dma; - - dma->next_buffer = NULL; - if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) { - wake_up_interruptible(&dma->next_queue->flush_queue); - } - dma->next_queue = NULL; -} - -int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long)) -{ - int i; - int candidate = -1; - int j = jiffies; - - if (!dev) { - DRM_ERROR("No device\n"); - return -1; - } - if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) { - /* This only happens between the time the - interrupt is initialized and the time - the queues are initialized. */ - return -1; - } - - /* Doing "while locked" DMA? */ - if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) { - return DRM_KERNEL_CONTEXT; - } - - /* If there are buffers on the last_context - queue, and we have not been executing - this context very long, continue to - execute this context. */ - if (dev->last_switch <= j - && dev->last_switch + DRM_TIME_SLICE > j - && DRM_WAITCOUNT(dev, dev->last_context)) { - return dev->last_context; - } - - /* Otherwise, find a candidate */ - for (i = dev->last_checked + 1; i < dev->queue_count; i++) { - if (DRM_WAITCOUNT(dev, i)) { - candidate = dev->last_checked = i; - break; - } - } - - if (candidate < 0) { - for (i = 0; i < dev->queue_count; i++) { - if (DRM_WAITCOUNT(dev, i)) { - candidate = dev->last_checked = i; - break; - } - } - } - - if (wrapper - && candidate >= 0 - && candidate != dev->last_context - && dev->last_switch <= j - && dev->last_switch + DRM_TIME_SLICE > j) { - if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) { - del_timer(&dev->timer); - dev->timer.function = wrapper; - dev->timer.data = (unsigned long)dev; - dev->timer.expires = dev->last_switch+DRM_TIME_SLICE; - add_timer(&dev->timer); - } - return -1; - } - - return candidate; -} - - -int DRM(dma_enqueue)(struct file *filp, drm_dma_t *d) -{ - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - int i; - drm_queue_t *q; - drm_buf_t *buf; - int idx; - int while_locked = 0; - drm_device_dma_t *dma = dev->dma; - DECLARE_WAITQUEUE(entry, current); - - DRM_DEBUG("%d\n", d->send_count); - - if (d->flags & _DRM_DMA_WHILE_LOCKED) { - int context = dev->lock.hw_lock->lock; - - if (!_DRM_LOCK_IS_HELD(context)) { - DRM_ERROR("No lock held during \"while locked\"" - " request\n"); - return -EINVAL; - } - if (d->context != _DRM_LOCKING_CONTEXT(context) - && _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) { - DRM_ERROR("Lock held by %d while %d makes" - " \"while locked\" request\n", - _DRM_LOCKING_CONTEXT(context), - d->context); - return -EINVAL; - } - q = dev->queuelist[DRM_KERNEL_CONTEXT]; - while_locked = 1; - } else { - q = dev->queuelist[d->context]; - } - - - atomic_inc(&q->use_count); - if (atomic_read(&q->block_write)) { - add_wait_queue(&q->write_queue, &entry); - atomic_inc(&q->block_count); - for (;;) { - current->state = TASK_INTERRUPTIBLE; - if (!atomic_read(&q->block_write)) break; - schedule(); - if (signal_pending(current)) { - atomic_dec(&q->use_count); - remove_wait_queue(&q->write_queue, &entry); - return -EINTR; - } - } - atomic_dec(&q->block_count); - current->state = TASK_RUNNING; - remove_wait_queue(&q->write_queue, &entry); - } - - for (i = 0; i < d->send_count; i++) { - idx = d->send_indices[i]; - if (idx < 0 || idx >= dma->buf_count) { - atomic_dec(&q->use_count); - DRM_ERROR("Index %d (of %d max)\n", - d->send_indices[i], dma->buf_count - 1); - return -EINVAL; - } - buf = dma->buflist[ idx ]; - if (buf->filp != filp) { - atomic_dec(&q->use_count); - DRM_ERROR("Process %d using buffer not owned\n", - current->pid); - return -EINVAL; - } - if (buf->list != DRM_LIST_NONE) { - atomic_dec(&q->use_count); - DRM_ERROR("Process %d using buffer %d on list %d\n", - current->pid, buf->idx, buf->list); - } - buf->used = d->send_sizes[i]; - buf->while_locked = while_locked; - buf->context = d->context; - if (!buf->used) { - DRM_ERROR("Queueing 0 length buffer\n"); - } - if (buf->pending) { - atomic_dec(&q->use_count); - DRM_ERROR("Queueing pending buffer:" - " buffer %d, offset %d\n", - d->send_indices[i], i); - return -EINVAL; - } - if (buf->waiting) { - atomic_dec(&q->use_count); - DRM_ERROR("Queueing waiting buffer:" - " buffer %d, offset %d\n", - d->send_indices[i], i); - return -EINVAL; - } - buf->waiting = 1; - if (atomic_read(&q->use_count) == 1 - || atomic_read(&q->finalization)) { - DRM(free_buffer)(dev, buf); - } else { - DRM(waitlist_put)(&q->waitlist, buf); - atomic_inc(&q->total_queued); - } - } - atomic_dec(&q->use_count); - - return 0; -} - -static int DRM(dma_get_buffers_of_order)(struct file *filp, drm_dma_t *d, - int order) -{ - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - int i; - drm_buf_t *buf; - drm_device_dma_t *dma = dev->dma; - - for (i = d->granted_count; i < d->request_count; i++) { - buf = DRM(freelist_get)(&dma->bufs[order].freelist, - d->flags & _DRM_DMA_WAIT); - if (!buf) break; - if (buf->pending || buf->waiting) { - DRM_ERROR("Free buffer %d in use: filp %p (w%d, p%d)\n", - buf->idx, - buf->filp, - buf->waiting, - buf->pending); - } - buf->filp = filp; - if (copy_to_user(&d->request_indices[i], - &buf->idx, - sizeof(buf->idx))) - return -EFAULT; - - if (copy_to_user(&d->request_sizes[i], - &buf->total, - sizeof(buf->total))) - return -EFAULT; - - ++d->granted_count; - } - return 0; -} - - -int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma) -{ - int order; - int retcode = 0; - int tmp_order; - - order = DRM(order)(dma->request_size); - - dma->granted_count = 0; - retcode = DRM(dma_get_buffers_of_order)(filp, dma, order); - - if (dma->granted_count < dma->request_count - && (dma->flags & _DRM_DMA_SMALLER_OK)) { - for (tmp_order = order - 1; - !retcode - && dma->granted_count < dma->request_count - && tmp_order >= DRM_MIN_ORDER; - --tmp_order) { - - retcode = DRM(dma_get_buffers_of_order)(filp, dma, - tmp_order); - } - } - - if (dma->granted_count < dma->request_count - && (dma->flags & _DRM_DMA_LARGER_OK)) { - for (tmp_order = order + 1; - !retcode - && dma->granted_count < dma->request_count - && tmp_order <= DRM_MAX_ORDER; - ++tmp_order) { - - retcode = DRM(dma_get_buffers_of_order)(filp, dma, - tmp_order); - } - } - return 0; -} - -#endif /* __HAVE_OLD_DMA */ #if __HAVE_DMA_IRQ diff --git a/linux-core/drm_init.c b/linux-core/drm_init.c index 2d6b6a3c..3aa4ad59 100644 --- a/linux-core/drm_init.c +++ b/linux-core/drm_init.c @@ -50,11 +50,6 @@ static void DRM(parse_option)(char *s) for (c = s; *c && *c != ':'; c++); /* find : or \0 */ if (*c) r = c + 1; else r = NULL; /* remember remainder */ *c = '\0'; /* terminate */ - if (!strcmp(s, "noctx")) { - DRM(flags) |= DRM_FLAG_NOCTX; - DRM_INFO("Server-mediated context switching OFF\n"); - return; - } if (!strcmp(s, "debug")) { DRM(flags) |= DRM_FLAG_DEBUG; DRM_INFO("Debug messages ON\n"); diff --git a/linux/drmP.h b/linux/drmP.h index 338345be..c530ca0c 100644 --- a/linux/drmP.h +++ b/linux/drmP.h @@ -127,7 +127,6 @@ #define DRM_LOCK_SLICE 1 /* Time slice for lock, in jiffies */ #define DRM_FLAG_DEBUG 0x01 -#define DRM_FLAG_NOCTX 0x02 #define DRM_MEM_DMA 0 #define DRM_MEM_SAREA 1 @@ -814,15 +813,6 @@ extern int DRM(dma_setup)(drm_device_t *dev); extern void DRM(dma_takedown)(drm_device_t *dev); extern void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf); extern void DRM(reclaim_buffers)( struct file *filp ); -#if __HAVE_OLD_DMA -/* GH: This is a dirty hack for now... - */ -extern void DRM(clear_next_buffer)(drm_device_t *dev); -extern int DRM(select_queue)(drm_device_t *dev, - void (*wrapper)(unsigned long)); -extern int DRM(dma_enqueue)(struct file *filp, drm_dma_t *dma); -extern int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma); -#endif #if __HAVE_DMA_IRQ extern int DRM(control)( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ); diff --git a/linux/drm_context.h b/linux/drm_context.h index 646b2236..b446956c 100644 --- a/linux/drm_context.h +++ b/linux/drm_context.h @@ -36,6 +36,11 @@ #define __NO_VERSION__ #include "drmP.h" +#if !__HAVE_CTX_BITMAP +#error "__HAVE_CTX_BITMAP must be defined" +#endif + + /* ================================================================ * Context bitmap support */ @@ -235,10 +240,6 @@ int DRM(context_switch)( drm_device_t *dev, int old, int new ) return 0; } - if ( DRM(flags) & DRM_FLAG_NOCTX ) { - DRM(context_switch_complete)( dev, new ); - } - return 0; } diff --git a/linux/drm_dma.h b/linux/drm_dma.h index 45c4d321..640e245d 100644 --- a/linux/drm_dma.h +++ b/linux/drm_dma.h @@ -180,276 +180,6 @@ void DRM(reclaim_buffers)( struct file *filp ) #endif -/* GH: This is a big hack for now... - */ -#if __HAVE_OLD_DMA - -void DRM(clear_next_buffer)(drm_device_t *dev) -{ - drm_device_dma_t *dma = dev->dma; - - dma->next_buffer = NULL; - if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) { - wake_up_interruptible(&dma->next_queue->flush_queue); - } - dma->next_queue = NULL; -} - -int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long)) -{ - int i; - int candidate = -1; - int j = jiffies; - - if (!dev) { - DRM_ERROR("No device\n"); - return -1; - } - if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) { - /* This only happens between the time the - interrupt is initialized and the time - the queues are initialized. */ - return -1; - } - - /* Doing "while locked" DMA? */ - if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) { - return DRM_KERNEL_CONTEXT; - } - - /* If there are buffers on the last_context - queue, and we have not been executing - this context very long, continue to - execute this context. */ - if (dev->last_switch <= j - && dev->last_switch + DRM_TIME_SLICE > j - && DRM_WAITCOUNT(dev, dev->last_context)) { - return dev->last_context; - } - - /* Otherwise, find a candidate */ - for (i = dev->last_checked + 1; i < dev->queue_count; i++) { - if (DRM_WAITCOUNT(dev, i)) { - candidate = dev->last_checked = i; - break; - } - } - - if (candidate < 0) { - for (i = 0; i < dev->queue_count; i++) { - if (DRM_WAITCOUNT(dev, i)) { - candidate = dev->last_checked = i; - break; - } - } - } - - if (wrapper - && candidate >= 0 - && candidate != dev->last_context - && dev->last_switch <= j - && dev->last_switch + DRM_TIME_SLICE > j) { - if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) { - del_timer(&dev->timer); - dev->timer.function = wrapper; - dev->timer.data = (unsigned long)dev; - dev->timer.expires = dev->last_switch+DRM_TIME_SLICE; - add_timer(&dev->timer); - } - return -1; - } - - return candidate; -} - - -int DRM(dma_enqueue)(struct file *filp, drm_dma_t *d) -{ - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - int i; - drm_queue_t *q; - drm_buf_t *buf; - int idx; - int while_locked = 0; - drm_device_dma_t *dma = dev->dma; - DECLARE_WAITQUEUE(entry, current); - - DRM_DEBUG("%d\n", d->send_count); - - if (d->flags & _DRM_DMA_WHILE_LOCKED) { - int context = dev->lock.hw_lock->lock; - - if (!_DRM_LOCK_IS_HELD(context)) { - DRM_ERROR("No lock held during \"while locked\"" - " request\n"); - return -EINVAL; - } - if (d->context != _DRM_LOCKING_CONTEXT(context) - && _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) { - DRM_ERROR("Lock held by %d while %d makes" - " \"while locked\" request\n", - _DRM_LOCKING_CONTEXT(context), - d->context); - return -EINVAL; - } - q = dev->queuelist[DRM_KERNEL_CONTEXT]; - while_locked = 1; - } else { - q = dev->queuelist[d->context]; - } - - - atomic_inc(&q->use_count); - if (atomic_read(&q->block_write)) { - add_wait_queue(&q->write_queue, &entry); - atomic_inc(&q->block_count); - for (;;) { - current->state = TASK_INTERRUPTIBLE; - if (!atomic_read(&q->block_write)) break; - schedule(); - if (signal_pending(current)) { - atomic_dec(&q->use_count); - remove_wait_queue(&q->write_queue, &entry); - return -EINTR; - } - } - atomic_dec(&q->block_count); - current->state = TASK_RUNNING; - remove_wait_queue(&q->write_queue, &entry); - } - - for (i = 0; i < d->send_count; i++) { - idx = d->send_indices[i]; - if (idx < 0 || idx >= dma->buf_count) { - atomic_dec(&q->use_count); - DRM_ERROR("Index %d (of %d max)\n", - d->send_indices[i], dma->buf_count - 1); - return -EINVAL; - } - buf = dma->buflist[ idx ]; - if (buf->filp != filp) { - atomic_dec(&q->use_count); - DRM_ERROR("Process %d using buffer not owned\n", - current->pid); - return -EINVAL; - } - if (buf->list != DRM_LIST_NONE) { - atomic_dec(&q->use_count); - DRM_ERROR("Process %d using buffer %d on list %d\n", - current->pid, buf->idx, buf->list); - } - buf->used = d->send_sizes[i]; - buf->while_locked = while_locked; - buf->context = d->context; - if (!buf->used) { - DRM_ERROR("Queueing 0 length buffer\n"); - } - if (buf->pending) { - atomic_dec(&q->use_count); - DRM_ERROR("Queueing pending buffer:" - " buffer %d, offset %d\n", - d->send_indices[i], i); - return -EINVAL; - } - if (buf->waiting) { - atomic_dec(&q->use_count); - DRM_ERROR("Queueing waiting buffer:" - " buffer %d, offset %d\n", - d->send_indices[i], i); - return -EINVAL; - } - buf->waiting = 1; - if (atomic_read(&q->use_count) == 1 - || atomic_read(&q->finalization)) { - DRM(free_buffer)(dev, buf); - } else { - DRM(waitlist_put)(&q->waitlist, buf); - atomic_inc(&q->total_queued); - } - } - atomic_dec(&q->use_count); - - return 0; -} - -static int DRM(dma_get_buffers_of_order)(struct file *filp, drm_dma_t *d, - int order) -{ - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - int i; - drm_buf_t *buf; - drm_device_dma_t *dma = dev->dma; - - for (i = d->granted_count; i < d->request_count; i++) { - buf = DRM(freelist_get)(&dma->bufs[order].freelist, - d->flags & _DRM_DMA_WAIT); - if (!buf) break; - if (buf->pending || buf->waiting) { - DRM_ERROR("Free buffer %d in use: filp %p (w%d, p%d)\n", - buf->idx, - buf->filp, - buf->waiting, - buf->pending); - } - buf->filp = filp; - if (copy_to_user(&d->request_indices[i], - &buf->idx, - sizeof(buf->idx))) - return -EFAULT; - - if (copy_to_user(&d->request_sizes[i], - &buf->total, - sizeof(buf->total))) - return -EFAULT; - - ++d->granted_count; - } - return 0; -} - - -int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma) -{ - int order; - int retcode = 0; - int tmp_order; - - order = DRM(order)(dma->request_size); - - dma->granted_count = 0; - retcode = DRM(dma_get_buffers_of_order)(filp, dma, order); - - if (dma->granted_count < dma->request_count - && (dma->flags & _DRM_DMA_SMALLER_OK)) { - for (tmp_order = order - 1; - !retcode - && dma->granted_count < dma->request_count - && tmp_order >= DRM_MIN_ORDER; - --tmp_order) { - - retcode = DRM(dma_get_buffers_of_order)(filp, dma, - tmp_order); - } - } - - if (dma->granted_count < dma->request_count - && (dma->flags & _DRM_DMA_LARGER_OK)) { - for (tmp_order = order + 1; - !retcode - && dma->granted_count < dma->request_count - && tmp_order <= DRM_MAX_ORDER; - ++tmp_order) { - - retcode = DRM(dma_get_buffers_of_order)(filp, dma, - tmp_order); - } - } - return 0; -} - -#endif /* __HAVE_OLD_DMA */ #if __HAVE_DMA_IRQ diff --git a/linux/drm_init.h b/linux/drm_init.h index 2d6b6a3c..3aa4ad59 100644 --- a/linux/drm_init.h +++ b/linux/drm_init.h @@ -50,11 +50,6 @@ static void DRM(parse_option)(char *s) for (c = s; *c && *c != ':'; c++); /* find : or \0 */ if (*c) r = c + 1; else r = NULL; /* remember remainder */ *c = '\0'; /* terminate */ - if (!strcmp(s, "noctx")) { - DRM(flags) |= DRM_FLAG_NOCTX; - DRM_INFO("Server-mediated context switching OFF\n"); - return; - } if (!strcmp(s, "debug")) { DRM(flags) |= DRM_FLAG_DEBUG; DRM_INFO("Debug messages ON\n"); diff --git a/linux/gamma_context.h b/linux/gamma_context.h index f5d2993c..9e4b913f 100644 --- a/linux/gamma_context.h +++ b/linux/gamma_context.h @@ -173,12 +173,11 @@ int DRM(context_switch)(drm_device_t *dev, int old, int new) return -EINVAL; } - if (DRM(flags) & DRM_FLAG_NOCTX) { - DRM(context_switch_complete)(dev, new); - } else { - sprintf(buf, "C %d %d\n", old, new); - DRM(write_string)(dev, buf); - } + /* This causes the X server to wake up & do a bunch of hardware + * interaction to actually effect the context switch. + */ + sprintf(buf, "C %d %d\n", old, new); + DRM(write_string)(dev, buf); atomic_dec(&q->use_count); diff --git a/linux/gamma_drv.c b/linux/gamma_drv.c index b08d04bf..52b8983f 100644 --- a/linux/gamma_drv.c +++ b/linux/gamma_drv.c @@ -41,6 +41,7 @@ #include "drm_bufs.h" #include "gamma_context.h" /* NOTE! */ #include "drm_dma.h" +#include "gamma_old_dma.h" #include "drm_drawable.h" #include "drm_drv.h" diff --git a/linux/gamma_drv.h b/linux/gamma_drv.h index 2f7d3588..2509ffbb 100644 --- a/linux/gamma_drv.h +++ b/linux/gamma_drv.h @@ -59,6 +59,16 @@ extern int gamma_dma(struct inode *inode, struct file *filp, extern int gamma_find_devices(void); extern int gamma_found(void); +/* Gamma-specific code pulled from drm_dma.h: + */ +extern void DRM(clear_next_buffer)(drm_device_t *dev); +extern int DRM(select_queue)(drm_device_t *dev, + void (*wrapper)(unsigned long)); +extern int DRM(dma_enqueue)(struct file *filp, drm_dma_t *dma); +extern int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma); + + + #define GLINT_DRI_BUF_COUNT 256 #define GAMMA_OFF(reg) \ diff --git a/linux/gamma_old_dma.h b/linux/gamma_old_dma.h new file mode 100644 index 00000000..43be5c80 --- /dev/null +++ b/linux/gamma_old_dma.h @@ -0,0 +1,300 @@ +/* drm_dma.c -- DMA IOCTL and function support -*- linux-c -*- + * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com + * + * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rickard E. (Rik) Faith + * Gareth Hughes + */ + + +/* Gamma-specific code pulled from drm_dma.h: + */ + +void DRM(clear_next_buffer)(drm_device_t *dev) +{ + drm_device_dma_t *dma = dev->dma; + + dma->next_buffer = NULL; + if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) { + wake_up_interruptible(&dma->next_queue->flush_queue); + } + dma->next_queue = NULL; +} + +int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long)) +{ + int i; + int candidate = -1; + int j = jiffies; + + if (!dev) { + DRM_ERROR("No device\n"); + return -1; + } + if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) { + /* This only happens between the time the + interrupt is initialized and the time + the queues are initialized. */ + return -1; + } + + /* Doing "while locked" DMA? */ + if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) { + return DRM_KERNEL_CONTEXT; + } + + /* If there are buffers on the last_context + queue, and we have not been executing + this context very long, continue to + execute this context. */ + if (dev->last_switch <= j + && dev->last_switch + DRM_TIME_SLICE > j + && DRM_WAITCOUNT(dev, dev->last_context)) { + return dev->last_context; + } + + /* Otherwise, find a candidate */ + for (i = dev->last_checked + 1; i < dev->queue_count; i++) { + if (DRM_WAITCOUNT(dev, i)) { + candidate = dev->last_checked = i; + break; + } + } + + if (candidate < 0) { + for (i = 0; i < dev->queue_count; i++) { + if (DRM_WAITCOUNT(dev, i)) { + candidate = dev->last_checked = i; + break; + } + } + } + + if (wrapper + && candidate >= 0 + && candidate != dev->last_context + && dev->last_switch <= j + && dev->last_switch + DRM_TIME_SLICE > j) { + if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) { + del_timer(&dev->timer); + dev->timer.function = wrapper; + dev->timer.data = (unsigned long)dev; + dev->timer.expires = dev->last_switch+DRM_TIME_SLICE; + add_timer(&dev->timer); + } + return -1; + } + + return candidate; +} + + +int DRM(dma_enqueue)(struct file *filp, drm_dma_t *d) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + int i; + drm_queue_t *q; + drm_buf_t *buf; + int idx; + int while_locked = 0; + drm_device_dma_t *dma = dev->dma; + DECLARE_WAITQUEUE(entry, current); + + DRM_DEBUG("%d\n", d->send_count); + + if (d->flags & _DRM_DMA_WHILE_LOCKED) { + int context = dev->lock.hw_lock->lock; + + if (!_DRM_LOCK_IS_HELD(context)) { + DRM_ERROR("No lock held during \"while locked\"" + " request\n"); + return -EINVAL; + } + if (d->context != _DRM_LOCKING_CONTEXT(context) + && _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) { + DRM_ERROR("Lock held by %d while %d makes" + " \"while locked\" request\n", + _DRM_LOCKING_CONTEXT(context), + d->context); + return -EINVAL; + } + q = dev->queuelist[DRM_KERNEL_CONTEXT]; + while_locked = 1; + } else { + q = dev->queuelist[d->context]; + } + + + atomic_inc(&q->use_count); + if (atomic_read(&q->block_write)) { + add_wait_queue(&q->write_queue, &entry); + atomic_inc(&q->block_count); + for (;;) { + current->state = TASK_INTERRUPTIBLE; + if (!atomic_read(&q->block_write)) break; + schedule(); + if (signal_pending(current)) { + atomic_dec(&q->use_count); + remove_wait_queue(&q->write_queue, &entry); + return -EINTR; + } + } + atomic_dec(&q->block_count); + current->state = TASK_RUNNING; + remove_wait_queue(&q->write_queue, &entry); + } + + for (i = 0; i < d->send_count; i++) { + idx = d->send_indices[i]; + if (idx < 0 || idx >= dma->buf_count) { + atomic_dec(&q->use_count); + DRM_ERROR("Index %d (of %d max)\n", + d->send_indices[i], dma->buf_count - 1); + return -EINVAL; + } + buf = dma->buflist[ idx ]; + if (buf->filp != filp) { + atomic_dec(&q->use_count); + DRM_ERROR("Process %d using buffer not owned\n", + current->pid); + return -EINVAL; + } + if (buf->list != DRM_LIST_NONE) { + atomic_dec(&q->use_count); + DRM_ERROR("Process %d using buffer %d on list %d\n", + current->pid, buf->idx, buf->list); + } + buf->used = d->send_sizes[i]; + buf->while_locked = while_locked; + buf->context = d->context; + if (!buf->used) { + DRM_ERROR("Queueing 0 length buffer\n"); + } + if (buf->pending) { + atomic_dec(&q->use_count); + DRM_ERROR("Queueing pending buffer:" + " buffer %d, offset %d\n", + d->send_indices[i], i); + return -EINVAL; + } + if (buf->waiting) { + atomic_dec(&q->use_count); + DRM_ERROR("Queueing waiting buffer:" + " buffer %d, offset %d\n", + d->send_indices[i], i); + return -EINVAL; + } + buf->waiting = 1; + if (atomic_read(&q->use_count) == 1 + || atomic_read(&q->finalization)) { + DRM(free_buffer)(dev, buf); + } else { + DRM(waitlist_put)(&q->waitlist, buf); + atomic_inc(&q->total_queued); + } + } + atomic_dec(&q->use_count); + + return 0; +} + +static int DRM(dma_get_buffers_of_order)(struct file *filp, drm_dma_t *d, + int order) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + int i; + drm_buf_t *buf; + drm_device_dma_t *dma = dev->dma; + + for (i = d->granted_count; i < d->request_count; i++) { + buf = DRM(freelist_get)(&dma->bufs[order].freelist, + d->flags & _DRM_DMA_WAIT); + if (!buf) break; + if (buf->pending || buf->waiting) { + DRM_ERROR("Free buffer %d in use: filp %p (w%d, p%d)\n", + buf->idx, + buf->filp, + buf->waiting, + buf->pending); + } + buf->filp = filp; + if (copy_to_user(&d->request_indices[i], + &buf->idx, + sizeof(buf->idx))) + return -EFAULT; + + if (copy_to_user(&d->request_sizes[i], + &buf->total, + sizeof(buf->total))) + return -EFAULT; + + ++d->granted_count; + } + return 0; +} + + +int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma) +{ + int order; + int retcode = 0; + int tmp_order; + + order = DRM(order)(dma->request_size); + + dma->granted_count = 0; + retcode = DRM(dma_get_buffers_of_order)(filp, dma, order); + + if (dma->granted_count < dma->request_count + && (dma->flags & _DRM_DMA_SMALLER_OK)) { + for (tmp_order = order - 1; + !retcode + && dma->granted_count < dma->request_count + && tmp_order >= DRM_MIN_ORDER; + --tmp_order) { + + retcode = DRM(dma_get_buffers_of_order)(filp, dma, + tmp_order); + } + } + + if (dma->granted_count < dma->request_count + && (dma->flags & _DRM_DMA_LARGER_OK)) { + for (tmp_order = order + 1; + !retcode + && dma->granted_count < dma->request_count + && tmp_order <= DRM_MAX_ORDER; + ++tmp_order) { + + retcode = DRM(dma_get_buffers_of_order)(filp, dma, + tmp_order); + } + } + return 0; +} +