Remove more gamma DMA code. This isn't all of it, but it's a major portion.
parent
c6d2af70cb
commit
01178567eb
|
@ -476,10 +476,6 @@ extern int DRM(lock_transfer)(drm_device_t *dev,
|
||||||
extern int DRM(lock_free)(drm_device_t *dev,
|
extern int DRM(lock_free)(drm_device_t *dev,
|
||||||
__volatile__ unsigned int *lock,
|
__volatile__ unsigned int *lock,
|
||||||
unsigned int context);
|
unsigned int context);
|
||||||
extern int DRM(flush_unblock)(drm_device_t *dev, int context,
|
|
||||||
drm_lock_flags_t flags);
|
|
||||||
extern int DRM(flush_block_and_flush)(drm_device_t *dev, int context,
|
|
||||||
drm_lock_flags_t flags);
|
|
||||||
|
|
||||||
/* Buffer management support (drm_bufs.h) */
|
/* Buffer management support (drm_bufs.h) */
|
||||||
extern int DRM(order)( unsigned long size );
|
extern int DRM(order)( unsigned long size );
|
||||||
|
@ -490,15 +486,6 @@ extern int DRM(dma_setup)(drm_device_t *dev);
|
||||||
extern void DRM(dma_takedown)(drm_device_t *dev);
|
extern void DRM(dma_takedown)(drm_device_t *dev);
|
||||||
extern void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf);
|
extern void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf);
|
||||||
extern void DRM(reclaim_buffers)(drm_device_t *dev, DRMFILE filp);
|
extern void DRM(reclaim_buffers)(drm_device_t *dev, DRMFILE filp);
|
||||||
#if __HAVE_OLD_DMA
|
|
||||||
/* GH: This is a dirty hack for now...
|
|
||||||
*/
|
|
||||||
extern void DRM(clear_next_buffer)(drm_device_t *dev);
|
|
||||||
extern int DRM(select_queue)(drm_device_t *dev,
|
|
||||||
void (*wrapper)(unsigned long));
|
|
||||||
extern int DRM(dma_enqueue)(drm_device_t *dev, drm_dma_t *dma);
|
|
||||||
extern int DRM(dma_get_buffers)(drm_device_t *dev, drm_dma_t *dma);
|
|
||||||
#endif
|
|
||||||
#if __HAVE_DMA_IRQ
|
#if __HAVE_DMA_IRQ
|
||||||
extern int DRM(irq_install)( drm_device_t *dev, int irq );
|
extern int DRM(irq_install)( drm_device_t *dev, int irq );
|
||||||
extern int DRM(irq_uninstall)( drm_device_t *dev );
|
extern int DRM(irq_uninstall)( drm_device_t *dev );
|
||||||
|
@ -568,6 +555,7 @@ extern int DRM(setunique)(DRM_IOCTL_ARGS);
|
||||||
extern int DRM(getmap)(DRM_IOCTL_ARGS);
|
extern int DRM(getmap)(DRM_IOCTL_ARGS);
|
||||||
extern int DRM(getclient)(DRM_IOCTL_ARGS);
|
extern int DRM(getclient)(DRM_IOCTL_ARGS);
|
||||||
extern int DRM(getstats)(DRM_IOCTL_ARGS);
|
extern int DRM(getstats)(DRM_IOCTL_ARGS);
|
||||||
|
extern int DRM(noop)(DRM_IOCTL_ARGS);
|
||||||
|
|
||||||
/* Context IOCTL support (drm_context.h) */
|
/* Context IOCTL support (drm_context.h) */
|
||||||
extern int DRM(resctx)(DRM_IOCTL_ARGS);
|
extern int DRM(resctx)(DRM_IOCTL_ARGS);
|
||||||
|
@ -588,11 +576,6 @@ extern int DRM(rmdraw)(DRM_IOCTL_ARGS);
|
||||||
extern int DRM(getmagic)(DRM_IOCTL_ARGS);
|
extern int DRM(getmagic)(DRM_IOCTL_ARGS);
|
||||||
extern int DRM(authmagic)(DRM_IOCTL_ARGS);
|
extern int DRM(authmagic)(DRM_IOCTL_ARGS);
|
||||||
|
|
||||||
/* Locking IOCTL support (drm_lock.h) */
|
|
||||||
extern int DRM(block)(DRM_IOCTL_ARGS);
|
|
||||||
extern int DRM(unblock)(DRM_IOCTL_ARGS);
|
|
||||||
extern int DRM(finish)(DRM_IOCTL_ARGS);
|
|
||||||
|
|
||||||
/* Buffer management support (drm_bufs.h) */
|
/* Buffer management support (drm_bufs.h) */
|
||||||
extern int DRM(addmap)(DRM_IOCTL_ARGS);
|
extern int DRM(addmap)(DRM_IOCTL_ARGS);
|
||||||
extern int DRM(rmmap)(DRM_IOCTL_ARGS);
|
extern int DRM(rmmap)(DRM_IOCTL_ARGS);
|
||||||
|
|
|
@ -32,7 +32,9 @@
|
||||||
|
|
||||||
#include "drmP.h"
|
#include "drmP.h"
|
||||||
|
|
||||||
#if __HAVE_CTX_BITMAP
|
#if !__HAVE_CTX_BITMAP
|
||||||
|
#error "__HAVE_CTX_BITMAP must be defined"
|
||||||
|
#endif
|
||||||
|
|
||||||
/* ================================================================
|
/* ================================================================
|
||||||
* Context bitmap support
|
* Context bitmap support
|
||||||
|
@ -345,347 +347,3 @@ int DRM(rmctx)( DRM_IOCTL_ARGS )
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#else /* __HAVE_CTX_BITMAP */
|
|
||||||
|
|
||||||
/* ================================================================
|
|
||||||
* Old-style context support
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
int DRM(context_switch)(drm_device_t *dev, int old, int new)
|
|
||||||
{
|
|
||||||
drm_queue_t *q;
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
atomic_inc(&dev->total_ctx);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (test_and_set_bit(0, &dev->context_flag)) {
|
|
||||||
DRM_ERROR("Reentering -- FIXME\n");
|
|
||||||
return DRM_ERR(EBUSY);
|
|
||||||
}
|
|
||||||
|
|
||||||
DRM_DEBUG("Context switch from %d to %d\n", old, new);
|
|
||||||
|
|
||||||
if (new >= dev->queue_count) {
|
|
||||||
clear_bit(0, &dev->context_flag);
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (new == dev->last_context) {
|
|
||||||
clear_bit(0, &dev->context_flag);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
q = dev->queuelist[new];
|
|
||||||
atomic_inc(&q->use_count);
|
|
||||||
if (atomic_read(&q->use_count) == 1) {
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
clear_bit(0, &dev->context_flag);
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(context_switch_complete)(drm_device_t *dev, int new)
|
|
||||||
{
|
|
||||||
drm_device_dma_t *dma = dev->dma;
|
|
||||||
|
|
||||||
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
|
|
||||||
dev->last_switch = jiffies;
|
|
||||||
|
|
||||||
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
|
|
||||||
DRM_ERROR("Lock isn't held after context switch\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
|
|
||||||
if (DRM(lock_free)(dev, &dev->lock.hw_lock->lock,
|
|
||||||
DRM_KERNEL_CONTEXT)) {
|
|
||||||
DRM_ERROR("Cannot free lock\n");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
clear_bit(0, &dev->context_flag);
|
|
||||||
DRM_WAKEUP_INT(&dev->context_wait);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int DRM(init_queue)(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx)
|
|
||||||
{
|
|
||||||
DRM_DEBUG("\n");
|
|
||||||
|
|
||||||
if (atomic_read(&q->use_count) != 1
|
|
||||||
|| atomic_read(&q->finalization)
|
|
||||||
|| atomic_read(&q->block_count)) {
|
|
||||||
DRM_ERROR("New queue is already in use: u%ld f%ld b%ld\n",
|
|
||||||
(unsigned long)atomic_read(&q->use_count),
|
|
||||||
(unsigned long)atomic_read(&q->finalization),
|
|
||||||
(unsigned long)atomic_read(&q->block_count));
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic_set(&q->finalization, 0);
|
|
||||||
atomic_set(&q->block_count, 0);
|
|
||||||
atomic_set(&q->block_read, 0);
|
|
||||||
atomic_set(&q->block_write, 0);
|
|
||||||
atomic_set(&q->total_queued, 0);
|
|
||||||
atomic_set(&q->total_flushed, 0);
|
|
||||||
atomic_set(&q->total_locks, 0);
|
|
||||||
|
|
||||||
q->write_queue = 0;
|
|
||||||
q->read_queue = 0;
|
|
||||||
q->flush_queue = 0;
|
|
||||||
|
|
||||||
q->flags = ctx->flags;
|
|
||||||
|
|
||||||
DRM(waitlist_create)(&q->waitlist, dev->dma->buf_count);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/* drm_alloc_queue:
|
|
||||||
PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
|
|
||||||
disappear (so all deallocation must be done after IOCTLs are off)
|
|
||||||
2) dev->queue_count < dev->queue_slots
|
|
||||||
3) dev->queuelist[i].use_count == 0 and
|
|
||||||
dev->queuelist[i].finalization == 0 if i not in use
|
|
||||||
POST: 1) dev->queuelist[i].use_count == 1
|
|
||||||
2) dev->queue_count < dev->queue_slots */
|
|
||||||
|
|
||||||
static int DRM(alloc_queue)(drm_device_t *dev)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
drm_queue_t *queue;
|
|
||||||
int oldslots;
|
|
||||||
int newslots;
|
|
||||||
/* Check for a free queue */
|
|
||||||
for (i = 0; i < dev->queue_count; i++) {
|
|
||||||
atomic_inc(&dev->queuelist[i]->use_count);
|
|
||||||
if (atomic_read(&dev->queuelist[i]->use_count) == 1
|
|
||||||
&& !atomic_read(&dev->queuelist[i]->finalization)) {
|
|
||||||
DRM_DEBUG("%d (free)\n", i);
|
|
||||||
return i;
|
|
||||||
}
|
|
||||||
atomic_dec(&dev->queuelist[i]->use_count);
|
|
||||||
}
|
|
||||||
/* Allocate a new queue */
|
|
||||||
DRM_LOCK;
|
|
||||||
|
|
||||||
queue = gamma_alloc(sizeof(*queue), DRM_MEM_QUEUES);
|
|
||||||
memset(queue, 0, sizeof(*queue));
|
|
||||||
atomic_set(&queue->use_count, 1);
|
|
||||||
|
|
||||||
++dev->queue_count;
|
|
||||||
if (dev->queue_count >= dev->queue_slots) {
|
|
||||||
oldslots = dev->queue_slots * sizeof(*dev->queuelist);
|
|
||||||
if (!dev->queue_slots) dev->queue_slots = 1;
|
|
||||||
dev->queue_slots *= 2;
|
|
||||||
newslots = dev->queue_slots * sizeof(*dev->queuelist);
|
|
||||||
|
|
||||||
dev->queuelist = DRM(realloc)(dev->queuelist,
|
|
||||||
oldslots,
|
|
||||||
newslots,
|
|
||||||
DRM_MEM_QUEUES);
|
|
||||||
if (!dev->queuelist) {
|
|
||||||
DRM_UNLOCK;
|
|
||||||
DRM_DEBUG("out of memory\n");
|
|
||||||
return DRM_ERR(ENOMEM);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dev->queuelist[dev->queue_count-1] = queue;
|
|
||||||
|
|
||||||
DRM_UNLOCK;
|
|
||||||
DRM_DEBUG("%d (new)\n", dev->queue_count - 1);
|
|
||||||
return dev->queue_count - 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(resctx)( DRM_IOCTL_ARGS )
|
|
||||||
{
|
|
||||||
drm_ctx_res_t res;
|
|
||||||
drm_ctx_t ctx;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
|
|
||||||
|
|
||||||
DRM_COPY_FROM_USER_IOCTL( res, (drm_ctx_res_t *)data, sizeof(res) );
|
|
||||||
|
|
||||||
if (res.count >= DRM_RESERVED_CONTEXTS) {
|
|
||||||
memset(&ctx, 0, sizeof(ctx));
|
|
||||||
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
|
|
||||||
ctx.handle = i;
|
|
||||||
if (DRM_COPY_TO_USER(&res.contexts[i],
|
|
||||||
&i,
|
|
||||||
sizeof(i)))
|
|
||||||
return DRM_ERR(EFAULT);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
res.count = DRM_RESERVED_CONTEXTS;
|
|
||||||
|
|
||||||
DRM_COPY_TO_USER_IOCTL( (drm_ctx_res_t *)data, res, sizeof(res) );
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(addctx)( DRM_IOCTL_ARGS )
|
|
||||||
{
|
|
||||||
DRM_DEVICE;
|
|
||||||
drm_ctx_t ctx;
|
|
||||||
|
|
||||||
DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
|
|
||||||
|
|
||||||
if ((ctx.handle = DRM(alloc_queue)(dev)) == DRM_KERNEL_CONTEXT) {
|
|
||||||
/* Init kernel's context and get a new one. */
|
|
||||||
DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
|
|
||||||
ctx.handle = DRM(alloc_queue)(dev);
|
|
||||||
}
|
|
||||||
DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
|
|
||||||
DRM_DEBUG("%d\n", ctx.handle);
|
|
||||||
|
|
||||||
DRM_COPY_TO_USER_IOCTL( (drm_ctx_t *)data, ctx, sizeof(ctx) );
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(modctx)( DRM_IOCTL_ARGS )
|
|
||||||
{
|
|
||||||
DRM_DEVICE;
|
|
||||||
drm_ctx_t ctx;
|
|
||||||
drm_queue_t *q;
|
|
||||||
|
|
||||||
DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
|
|
||||||
|
|
||||||
DRM_DEBUG("%d\n", ctx.handle);
|
|
||||||
|
|
||||||
if (ctx.handle < 0 || ctx.handle >= dev->queue_count)
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
q = dev->queuelist[ctx.handle];
|
|
||||||
|
|
||||||
atomic_inc(&q->use_count);
|
|
||||||
if (atomic_read(&q->use_count) == 1) {
|
|
||||||
/* No longer in use */
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (DRM_BUFCOUNT(&q->waitlist)) {
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
return DRM_ERR(EBUSY);
|
|
||||||
}
|
|
||||||
|
|
||||||
q->flags = ctx.flags;
|
|
||||||
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(getctx)( DRM_IOCTL_ARGS )
|
|
||||||
{
|
|
||||||
DRM_DEVICE;
|
|
||||||
drm_ctx_t ctx;
|
|
||||||
drm_queue_t *q;
|
|
||||||
|
|
||||||
DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
|
|
||||||
|
|
||||||
DRM_DEBUG("%d\n", ctx.handle);
|
|
||||||
|
|
||||||
if (ctx.handle >= dev->queue_count)
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
q = dev->queuelist[ctx.handle];
|
|
||||||
|
|
||||||
atomic_inc(&q->use_count);
|
|
||||||
if (atomic_read(&q->use_count) == 1) {
|
|
||||||
/* No longer in use */
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.flags = q->flags;
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
|
|
||||||
DRM_COPY_TO_USER_IOCTL( (drm_ctx_t *)data, ctx, sizeof(ctx) );
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(switchctx)( DRM_IOCTL_ARGS )
|
|
||||||
{
|
|
||||||
DRM_DEVICE;
|
|
||||||
drm_ctx_t ctx;
|
|
||||||
|
|
||||||
DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
|
|
||||||
|
|
||||||
DRM_DEBUG("%d\n", ctx.handle);
|
|
||||||
return DRM(context_switch)(dev, dev->last_context, ctx.handle);
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(newctx)( DRM_IOCTL_ARGS )
|
|
||||||
{
|
|
||||||
DRM_DEVICE;
|
|
||||||
drm_ctx_t ctx;
|
|
||||||
|
|
||||||
DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
|
|
||||||
|
|
||||||
DRM_DEBUG("%d\n", ctx.handle);
|
|
||||||
DRM(context_switch_complete)(dev, ctx.handle);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(rmctx)( DRM_IOCTL_ARGS )
|
|
||||||
{
|
|
||||||
DRM_DEVICE;
|
|
||||||
drm_ctx_t ctx;
|
|
||||||
drm_queue_t *q;
|
|
||||||
drm_buf_t *buf;
|
|
||||||
|
|
||||||
DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
|
|
||||||
|
|
||||||
DRM_DEBUG("%d\n", ctx.handle);
|
|
||||||
|
|
||||||
if (ctx.handle >= dev->queue_count) return DRM_ERR(EINVAL);
|
|
||||||
q = dev->queuelist[ctx.handle];
|
|
||||||
|
|
||||||
atomic_inc(&q->use_count);
|
|
||||||
if (atomic_read(&q->use_count) == 1) {
|
|
||||||
/* No longer in use */
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic_inc(&q->finalization); /* Mark queue in finalization state */
|
|
||||||
atomic_sub(2, &q->use_count); /* Mark queue as unused (pending
|
|
||||||
finalization) */
|
|
||||||
|
|
||||||
while (test_and_set_bit(0, &dev->interrupt_flag)) {
|
|
||||||
static int never;
|
|
||||||
int retcode;
|
|
||||||
retcode = tsleep(&never, PZERO|PCATCH, "never", 1);
|
|
||||||
if (retcode)
|
|
||||||
return retcode;
|
|
||||||
}
|
|
||||||
/* Remove queued buffers */
|
|
||||||
while ((buf = DRM(waitlist_get)(&q->waitlist))) {
|
|
||||||
DRM(free_buffer)(dev, buf);
|
|
||||||
}
|
|
||||||
clear_bit(0, &dev->interrupt_flag);
|
|
||||||
|
|
||||||
/* Wakeup blocked processes */
|
|
||||||
wakeup( &q->block_read );
|
|
||||||
wakeup( &q->block_write );
|
|
||||||
DRM_WAKEUP_INT( &q->flush_queue );
|
|
||||||
/* Finalization over. Queue is made
|
|
||||||
available when both use_count and
|
|
||||||
finalization become 0, which won't
|
|
||||||
happen until all the waiting processes
|
|
||||||
stop waiting. */
|
|
||||||
atomic_dec(&q->finalization);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __HAVE_CTX_BITMAP */
|
|
||||||
|
|
|
@ -171,271 +171,6 @@ void DRM(reclaim_buffers)(drm_device_t *dev, DRMFILE filp)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
/* GH: This is a big hack for now...
|
|
||||||
*/
|
|
||||||
#if __HAVE_OLD_DMA
|
|
||||||
|
|
||||||
void DRM(clear_next_buffer)(drm_device_t *dev)
|
|
||||||
{
|
|
||||||
drm_device_dma_t *dma = dev->dma;
|
|
||||||
|
|
||||||
dma->next_buffer = NULL;
|
|
||||||
if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
|
|
||||||
DRM_WAKEUP_INT(&dma->next_queue->flush_queue);
|
|
||||||
}
|
|
||||||
dma->next_queue = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long))
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
int candidate = -1;
|
|
||||||
int j = jiffies;
|
|
||||||
|
|
||||||
if (!dev) {
|
|
||||||
DRM_ERROR("No device\n");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
|
|
||||||
/* This only happens between the time the
|
|
||||||
interrupt is initialized and the time
|
|
||||||
the queues are initialized. */
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Doing "while locked" DMA? */
|
|
||||||
if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
|
|
||||||
return DRM_KERNEL_CONTEXT;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If there are buffers on the last_context
|
|
||||||
queue, and we have not been executing
|
|
||||||
this context very long, continue to
|
|
||||||
execute this context. */
|
|
||||||
if (dev->last_switch <= j
|
|
||||||
&& dev->last_switch + DRM_TIME_SLICE > j
|
|
||||||
&& DRM_WAITCOUNT(dev, dev->last_context)) {
|
|
||||||
return dev->last_context;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Otherwise, find a candidate */
|
|
||||||
for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
|
|
||||||
if (DRM_WAITCOUNT(dev, i)) {
|
|
||||||
candidate = dev->last_checked = i;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (candidate < 0) {
|
|
||||||
for (i = 0; i < dev->queue_count; i++) {
|
|
||||||
if (DRM_WAITCOUNT(dev, i)) {
|
|
||||||
candidate = dev->last_checked = i;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (wrapper
|
|
||||||
&& candidate >= 0
|
|
||||||
&& candidate != dev->last_context
|
|
||||||
&& dev->last_switch <= j
|
|
||||||
&& dev->last_switch + DRM_TIME_SLICE > j) {
|
|
||||||
int s = splclock();
|
|
||||||
if (dev->timer.c_time != dev->last_switch + DRM_TIME_SLICE) {
|
|
||||||
callout_reset(&dev->timer,
|
|
||||||
dev->last_switch + DRM_TIME_SLICE - j,
|
|
||||||
(void (*)(void *))wrapper,
|
|
||||||
dev);
|
|
||||||
}
|
|
||||||
splx(s);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return candidate;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int DRM(dma_enqueue)(drm_device_t *dev, drm_dma_t *d)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
drm_queue_t *q;
|
|
||||||
drm_buf_t *buf;
|
|
||||||
int idx;
|
|
||||||
int while_locked = 0;
|
|
||||||
drm_device_dma_t *dma = dev->dma;
|
|
||||||
int error;
|
|
||||||
|
|
||||||
DRM_DEBUG("%d\n", d->send_count);
|
|
||||||
|
|
||||||
if (d->flags & _DRM_DMA_WHILE_LOCKED) {
|
|
||||||
int context = dev->lock.hw_lock->lock;
|
|
||||||
|
|
||||||
if (!_DRM_LOCK_IS_HELD(context)) {
|
|
||||||
DRM_ERROR("No lock held during \"while locked\""
|
|
||||||
" request\n");
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
}
|
|
||||||
if (d->context != _DRM_LOCKING_CONTEXT(context)
|
|
||||||
&& _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
|
|
||||||
DRM_ERROR("Lock held by %d while %d makes"
|
|
||||||
" \"while locked\" request\n",
|
|
||||||
_DRM_LOCKING_CONTEXT(context),
|
|
||||||
d->context);
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
}
|
|
||||||
q = dev->queuelist[DRM_KERNEL_CONTEXT];
|
|
||||||
while_locked = 1;
|
|
||||||
} else {
|
|
||||||
q = dev->queuelist[d->context];
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
atomic_inc(&q->use_count);
|
|
||||||
if (atomic_read(&q->block_write)) {
|
|
||||||
atomic_inc(&q->block_count);
|
|
||||||
for (;;) {
|
|
||||||
if (!atomic_read(&q->block_write)) break;
|
|
||||||
error = tsleep(&q->block_write, PZERO|PCATCH,
|
|
||||||
"dmawr", 0);
|
|
||||||
if (error) {
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
atomic_dec(&q->block_count);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < d->send_count; i++) {
|
|
||||||
idx = d->send_indices[i];
|
|
||||||
if (idx < 0 || idx >= dma->buf_count) {
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
DRM_ERROR("Index %d (of %d max)\n",
|
|
||||||
d->send_indices[i], dma->buf_count - 1);
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
}
|
|
||||||
buf = dma->buflist[ idx ];
|
|
||||||
if (buf->pid != DRM_CURRENTPID) {
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
DRM_ERROR("Process %d using buffer owned by %d\n",
|
|
||||||
DRM_CURRENTPID, buf->pid);
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
}
|
|
||||||
if (buf->list != DRM_LIST_NONE) {
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
DRM_ERROR("Process %d using buffer %d on list %d\n",
|
|
||||||
DRM_CURRENTPID, buf->idx, buf->list);
|
|
||||||
}
|
|
||||||
buf->used = d->send_sizes[i];
|
|
||||||
buf->while_locked = while_locked;
|
|
||||||
buf->context = d->context;
|
|
||||||
if (!buf->used) {
|
|
||||||
DRM_ERROR("Queueing 0 length buffer\n");
|
|
||||||
}
|
|
||||||
if (buf->pending) {
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
DRM_ERROR("Queueing pending buffer:"
|
|
||||||
" buffer %d, offset %d\n",
|
|
||||||
d->send_indices[i], i);
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
}
|
|
||||||
if (buf->waiting) {
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
DRM_ERROR("Queueing waiting buffer:"
|
|
||||||
" buffer %d, offset %d\n",
|
|
||||||
d->send_indices[i], i);
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
}
|
|
||||||
buf->waiting = 1;
|
|
||||||
if (atomic_read(&q->use_count) == 1
|
|
||||||
|| atomic_read(&q->finalization)) {
|
|
||||||
DRM(free_buffer)(dev, buf);
|
|
||||||
} else {
|
|
||||||
DRM(waitlist_put)(&q->waitlist, buf);
|
|
||||||
atomic_inc(&q->total_queued);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int DRM(dma_get_buffers_of_order)(drm_device_t *dev, drm_dma_t *d,
|
|
||||||
int order)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
drm_buf_t *buf;
|
|
||||||
drm_device_dma_t *dma = dev->dma;
|
|
||||||
|
|
||||||
for (i = d->granted_count; i < d->request_count; i++) {
|
|
||||||
buf = DRM(freelist_get)(&dma->bufs[order].freelist,
|
|
||||||
d->flags & _DRM_DMA_WAIT);
|
|
||||||
if (!buf) break;
|
|
||||||
if (buf->pending || buf->waiting) {
|
|
||||||
DRM_ERROR("Free buffer %d in use by %d (w%d, p%d)\n",
|
|
||||||
buf->idx,
|
|
||||||
buf->pid,
|
|
||||||
buf->waiting,
|
|
||||||
buf->pending);
|
|
||||||
}
|
|
||||||
buf->pid = DRM_CURRENTPID;
|
|
||||||
if (DRM_COPY_TO_USER(&d->request_indices[i],
|
|
||||||
&buf->idx,
|
|
||||||
sizeof(buf->idx)))
|
|
||||||
return DRM_ERR(EFAULT);
|
|
||||||
|
|
||||||
if (DRM_COPY_TO_USER(&d->request_sizes[i],
|
|
||||||
&buf->total,
|
|
||||||
sizeof(buf->total)))
|
|
||||||
return DRM_ERR(EFAULT);
|
|
||||||
|
|
||||||
++d->granted_count;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int DRM(dma_get_buffers)(drm_device_t *dev, drm_dma_t *dma)
|
|
||||||
{
|
|
||||||
int order;
|
|
||||||
int retcode = 0;
|
|
||||||
int tmp_order;
|
|
||||||
|
|
||||||
order = DRM(order)(dma->request_size);
|
|
||||||
|
|
||||||
dma->granted_count = 0;
|
|
||||||
retcode = DRM(dma_get_buffers_of_order)(dev, dma, order);
|
|
||||||
|
|
||||||
if (dma->granted_count < dma->request_count
|
|
||||||
&& (dma->flags & _DRM_DMA_SMALLER_OK)) {
|
|
||||||
for (tmp_order = order - 1;
|
|
||||||
!retcode
|
|
||||||
&& dma->granted_count < dma->request_count
|
|
||||||
&& tmp_order >= DRM_MIN_ORDER;
|
|
||||||
--tmp_order) {
|
|
||||||
|
|
||||||
retcode = DRM(dma_get_buffers_of_order)(dev, dma,
|
|
||||||
tmp_order);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (dma->granted_count < dma->request_count
|
|
||||||
&& (dma->flags & _DRM_DMA_LARGER_OK)) {
|
|
||||||
for (tmp_order = order + 1;
|
|
||||||
!retcode
|
|
||||||
&& dma->granted_count < dma->request_count
|
|
||||||
&& tmp_order <= DRM_MAX_ORDER;
|
|
||||||
++tmp_order) {
|
|
||||||
|
|
||||||
retcode = DRM(dma_get_buffers_of_order)(dev, dma,
|
|
||||||
tmp_order);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __HAVE_OLD_DMA */
|
|
||||||
|
|
||||||
|
|
||||||
#if __HAVE_DMA_IRQ
|
#if __HAVE_DMA_IRQ
|
||||||
|
|
||||||
int DRM(irq_install)( drm_device_t *dev, int irq )
|
int DRM(irq_install)( drm_device_t *dev, int irq )
|
||||||
|
|
|
@ -68,12 +68,6 @@
|
||||||
#ifndef __HAVE_DMA_SCHEDULE
|
#ifndef __HAVE_DMA_SCHEDULE
|
||||||
#define __HAVE_DMA_SCHEDULE 0
|
#define __HAVE_DMA_SCHEDULE 0
|
||||||
#endif
|
#endif
|
||||||
#ifndef __HAVE_DMA_FLUSH
|
|
||||||
#define __HAVE_DMA_FLUSH 0
|
|
||||||
#endif
|
|
||||||
#ifndef __HAVE_DMA_READY
|
|
||||||
#define __HAVE_DMA_READY 0
|
|
||||||
#endif
|
|
||||||
#ifndef __HAVE_DMA_QUIESCENT
|
#ifndef __HAVE_DMA_QUIESCENT
|
||||||
#define __HAVE_DMA_QUIESCENT 0
|
#define __HAVE_DMA_QUIESCENT 0
|
||||||
#endif
|
#endif
|
||||||
|
@ -150,8 +144,8 @@ static drm_ioctl_desc_t DRM(ioctls)[] = {
|
||||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = { DRM(getstats), 0, 0 },
|
[DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = { DRM(getstats), 0, 0 },
|
||||||
|
|
||||||
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { DRM(setunique), 1, 1 },
|
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { DRM(setunique), 1, 1 },
|
||||||
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { DRM(block), 1, 1 },
|
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { DRM(noop), 1, 1 },
|
||||||
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { DRM(unblock), 1, 1 },
|
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { DRM(noop), 1, 1 },
|
||||||
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { DRM(authmagic), 1, 1 },
|
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { DRM(authmagic), 1, 1 },
|
||||||
|
|
||||||
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { DRM(addmap), 1, 1 },
|
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { DRM(addmap), 1, 1 },
|
||||||
|
@ -175,7 +169,7 @@ static drm_ioctl_desc_t DRM(ioctls)[] = {
|
||||||
|
|
||||||
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { DRM(lock), 1, 0 },
|
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { DRM(lock), 1, 0 },
|
||||||
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { DRM(unlock), 1, 0 },
|
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { DRM(unlock), 1, 0 },
|
||||||
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(finish), 1, 0 },
|
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(noop), 1, 0 },
|
||||||
|
|
||||||
#if __HAVE_DMA
|
#if __HAVE_DMA
|
||||||
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { DRM(addbufs), 1, 1 },
|
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { DRM(addbufs), 1, 1 },
|
||||||
|
@ -514,12 +508,6 @@ static int DRM(setup)( drm_device_t *dev )
|
||||||
|
|
||||||
DRM_DEBUG( "\n" );
|
DRM_DEBUG( "\n" );
|
||||||
|
|
||||||
/* The kernel's context could be created here, but is now created
|
|
||||||
* in drm_dma_enqueue. This is more resource-efficient for
|
|
||||||
* hardware that does not do DMA, but may mean that
|
|
||||||
* drm_select_queue fails between the time the interrupt is
|
|
||||||
* initialized and the time the queues are initialized.
|
|
||||||
*/
|
|
||||||
DRIVER_POSTSETUP();
|
DRIVER_POSTSETUP();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1112,9 +1100,6 @@ int DRM(lock)( DRM_IOCTL_ARGS )
|
||||||
q = dev->queuelist[lock.context];
|
q = dev->queuelist[lock.context];
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __HAVE_DMA_FLUSH
|
|
||||||
ret = DRM(flush_block_and_flush)( dev, lock.context, lock.flags );
|
|
||||||
#endif
|
|
||||||
if ( !ret ) {
|
if ( !ret ) {
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if ( !dev->lock.hw_lock ) {
|
if ( !dev->lock.hw_lock ) {
|
||||||
|
@ -1140,18 +1125,9 @@ int DRM(lock)( DRM_IOCTL_ARGS )
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#if __HAVE_DMA_FLUSH
|
|
||||||
DRM(flush_unblock)( dev, lock.context, lock.flags ); /* cleanup phase */
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if ( !ret ) {
|
if ( !ret ) {
|
||||||
/* FIXME: Add signal blocking here */
|
/* FIXME: Add signal blocking here */
|
||||||
|
|
||||||
#if __HAVE_DMA_READY
|
|
||||||
if ( lock.flags & _DRM_LOCK_READY ) {
|
|
||||||
DRIVER_DMA_READY();
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#if __HAVE_DMA_QUIESCENT
|
#if __HAVE_DMA_QUIESCENT
|
||||||
if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
|
if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
|
||||||
DRIVER_DMA_QUIESCENT();
|
DRIVER_DMA_QUIESCENT();
|
||||||
|
|
|
@ -230,3 +230,9 @@ int DRM(getstats)( DRM_IOCTL_ARGS )
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int DRM(noop)(DRM_IOCTL_ARGS)
|
||||||
|
{
|
||||||
|
DRM_DEBUG("\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
|
@ -33,18 +33,6 @@
|
||||||
|
|
||||||
#include "drmP.h"
|
#include "drmP.h"
|
||||||
|
|
||||||
int DRM(block)( DRM_IOCTL_ARGS )
|
|
||||||
{
|
|
||||||
DRM_DEBUG("\n");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(unblock)( DRM_IOCTL_ARGS )
|
|
||||||
{
|
|
||||||
DRM_DEBUG("\n");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(lock_take)(__volatile__ unsigned int *lock, unsigned int context)
|
int DRM(lock_take)(__volatile__ unsigned int *lock, unsigned int context)
|
||||||
{
|
{
|
||||||
unsigned int old, new;
|
unsigned int old, new;
|
||||||
|
@ -107,98 +95,3 @@ int DRM(lock_free)(drm_device_t *dev,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int DRM(flush_queue)(drm_device_t *dev, int context)
|
|
||||||
{
|
|
||||||
int error;
|
|
||||||
int ret = 0;
|
|
||||||
drm_queue_t *q = dev->queuelist[context];
|
|
||||||
|
|
||||||
DRM_DEBUG("\n");
|
|
||||||
|
|
||||||
atomic_inc(&q->use_count);
|
|
||||||
if (atomic_read(&q->use_count) > 1) {
|
|
||||||
atomic_inc(&q->block_write);
|
|
||||||
atomic_inc(&q->block_count);
|
|
||||||
error = tsleep((void *)&q->flush_queue, PZERO|PCATCH, "drmfq", 0);
|
|
||||||
if (error)
|
|
||||||
return error;
|
|
||||||
atomic_dec(&q->block_count);
|
|
||||||
}
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
|
|
||||||
/* NOTE: block_write is still incremented!
|
|
||||||
Use drm_flush_unlock_queue to decrement. */
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int DRM(flush_unblock_queue)(drm_device_t *dev, int context)
|
|
||||||
{
|
|
||||||
drm_queue_t *q = dev->queuelist[context];
|
|
||||||
|
|
||||||
DRM_DEBUG("\n");
|
|
||||||
|
|
||||||
atomic_inc(&q->use_count);
|
|
||||||
if (atomic_read(&q->use_count) > 1) {
|
|
||||||
if (atomic_read(&q->block_write)) {
|
|
||||||
atomic_dec(&q->block_write);
|
|
||||||
DRM_WAKEUP_INT((void *)&q->write_queue);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(flush_block_and_flush)(drm_device_t *dev, int context,
|
|
||||||
drm_lock_flags_t flags)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
DRM_DEBUG("\n");
|
|
||||||
|
|
||||||
if (flags & _DRM_LOCK_FLUSH) {
|
|
||||||
ret = DRM(flush_queue)(dev, DRM_KERNEL_CONTEXT);
|
|
||||||
if (!ret) ret = DRM(flush_queue)(dev, context);
|
|
||||||
}
|
|
||||||
if (flags & _DRM_LOCK_FLUSH_ALL) {
|
|
||||||
for (i = 0; !ret && i < dev->queue_count; i++) {
|
|
||||||
ret = DRM(flush_queue)(dev, i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(flush_unblock)(drm_device_t *dev, int context, drm_lock_flags_t flags)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
DRM_DEBUG("\n");
|
|
||||||
|
|
||||||
if (flags & _DRM_LOCK_FLUSH) {
|
|
||||||
ret = DRM(flush_unblock_queue)(dev, DRM_KERNEL_CONTEXT);
|
|
||||||
if (!ret) ret = DRM(flush_unblock_queue)(dev, context);
|
|
||||||
}
|
|
||||||
if (flags & _DRM_LOCK_FLUSH_ALL) {
|
|
||||||
for (i = 0; !ret && i < dev->queue_count; i++) {
|
|
||||||
ret = DRM(flush_unblock_queue)(dev, i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(finish)( DRM_IOCTL_ARGS )
|
|
||||||
{
|
|
||||||
DRM_DEVICE;
|
|
||||||
int ret = 0;
|
|
||||||
drm_lock_t lock;
|
|
||||||
|
|
||||||
DRM_DEBUG("\n");
|
|
||||||
|
|
||||||
DRM_COPY_FROM_USER_IOCTL( lock, (drm_lock_t *)data, sizeof(lock) );
|
|
||||||
|
|
||||||
ret = DRM(flush_block_and_flush)(dev, lock.context, lock.flags);
|
|
||||||
DRM(flush_unblock)(dev, lock.context, lock.flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
19
bsd/drmP.h
19
bsd/drmP.h
|
@ -476,10 +476,6 @@ extern int DRM(lock_transfer)(drm_device_t *dev,
|
||||||
extern int DRM(lock_free)(drm_device_t *dev,
|
extern int DRM(lock_free)(drm_device_t *dev,
|
||||||
__volatile__ unsigned int *lock,
|
__volatile__ unsigned int *lock,
|
||||||
unsigned int context);
|
unsigned int context);
|
||||||
extern int DRM(flush_unblock)(drm_device_t *dev, int context,
|
|
||||||
drm_lock_flags_t flags);
|
|
||||||
extern int DRM(flush_block_and_flush)(drm_device_t *dev, int context,
|
|
||||||
drm_lock_flags_t flags);
|
|
||||||
|
|
||||||
/* Buffer management support (drm_bufs.h) */
|
/* Buffer management support (drm_bufs.h) */
|
||||||
extern int DRM(order)( unsigned long size );
|
extern int DRM(order)( unsigned long size );
|
||||||
|
@ -490,15 +486,6 @@ extern int DRM(dma_setup)(drm_device_t *dev);
|
||||||
extern void DRM(dma_takedown)(drm_device_t *dev);
|
extern void DRM(dma_takedown)(drm_device_t *dev);
|
||||||
extern void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf);
|
extern void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf);
|
||||||
extern void DRM(reclaim_buffers)(drm_device_t *dev, DRMFILE filp);
|
extern void DRM(reclaim_buffers)(drm_device_t *dev, DRMFILE filp);
|
||||||
#if __HAVE_OLD_DMA
|
|
||||||
/* GH: This is a dirty hack for now...
|
|
||||||
*/
|
|
||||||
extern void DRM(clear_next_buffer)(drm_device_t *dev);
|
|
||||||
extern int DRM(select_queue)(drm_device_t *dev,
|
|
||||||
void (*wrapper)(unsigned long));
|
|
||||||
extern int DRM(dma_enqueue)(drm_device_t *dev, drm_dma_t *dma);
|
|
||||||
extern int DRM(dma_get_buffers)(drm_device_t *dev, drm_dma_t *dma);
|
|
||||||
#endif
|
|
||||||
#if __HAVE_DMA_IRQ
|
#if __HAVE_DMA_IRQ
|
||||||
extern int DRM(irq_install)( drm_device_t *dev, int irq );
|
extern int DRM(irq_install)( drm_device_t *dev, int irq );
|
||||||
extern int DRM(irq_uninstall)( drm_device_t *dev );
|
extern int DRM(irq_uninstall)( drm_device_t *dev );
|
||||||
|
@ -568,6 +555,7 @@ extern int DRM(setunique)(DRM_IOCTL_ARGS);
|
||||||
extern int DRM(getmap)(DRM_IOCTL_ARGS);
|
extern int DRM(getmap)(DRM_IOCTL_ARGS);
|
||||||
extern int DRM(getclient)(DRM_IOCTL_ARGS);
|
extern int DRM(getclient)(DRM_IOCTL_ARGS);
|
||||||
extern int DRM(getstats)(DRM_IOCTL_ARGS);
|
extern int DRM(getstats)(DRM_IOCTL_ARGS);
|
||||||
|
extern int DRM(noop)(DRM_IOCTL_ARGS);
|
||||||
|
|
||||||
/* Context IOCTL support (drm_context.h) */
|
/* Context IOCTL support (drm_context.h) */
|
||||||
extern int DRM(resctx)(DRM_IOCTL_ARGS);
|
extern int DRM(resctx)(DRM_IOCTL_ARGS);
|
||||||
|
@ -588,11 +576,6 @@ extern int DRM(rmdraw)(DRM_IOCTL_ARGS);
|
||||||
extern int DRM(getmagic)(DRM_IOCTL_ARGS);
|
extern int DRM(getmagic)(DRM_IOCTL_ARGS);
|
||||||
extern int DRM(authmagic)(DRM_IOCTL_ARGS);
|
extern int DRM(authmagic)(DRM_IOCTL_ARGS);
|
||||||
|
|
||||||
/* Locking IOCTL support (drm_lock.h) */
|
|
||||||
extern int DRM(block)(DRM_IOCTL_ARGS);
|
|
||||||
extern int DRM(unblock)(DRM_IOCTL_ARGS);
|
|
||||||
extern int DRM(finish)(DRM_IOCTL_ARGS);
|
|
||||||
|
|
||||||
/* Buffer management support (drm_bufs.h) */
|
/* Buffer management support (drm_bufs.h) */
|
||||||
extern int DRM(addmap)(DRM_IOCTL_ARGS);
|
extern int DRM(addmap)(DRM_IOCTL_ARGS);
|
||||||
extern int DRM(rmmap)(DRM_IOCTL_ARGS);
|
extern int DRM(rmmap)(DRM_IOCTL_ARGS);
|
||||||
|
|
|
@ -32,7 +32,9 @@
|
||||||
|
|
||||||
#include "drmP.h"
|
#include "drmP.h"
|
||||||
|
|
||||||
#if __HAVE_CTX_BITMAP
|
#if !__HAVE_CTX_BITMAP
|
||||||
|
#error "__HAVE_CTX_BITMAP must be defined"
|
||||||
|
#endif
|
||||||
|
|
||||||
/* ================================================================
|
/* ================================================================
|
||||||
* Context bitmap support
|
* Context bitmap support
|
||||||
|
@ -345,347 +347,3 @@ int DRM(rmctx)( DRM_IOCTL_ARGS )
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#else /* __HAVE_CTX_BITMAP */
|
|
||||||
|
|
||||||
/* ================================================================
|
|
||||||
* Old-style context support
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
int DRM(context_switch)(drm_device_t *dev, int old, int new)
|
|
||||||
{
|
|
||||||
drm_queue_t *q;
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
atomic_inc(&dev->total_ctx);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (test_and_set_bit(0, &dev->context_flag)) {
|
|
||||||
DRM_ERROR("Reentering -- FIXME\n");
|
|
||||||
return DRM_ERR(EBUSY);
|
|
||||||
}
|
|
||||||
|
|
||||||
DRM_DEBUG("Context switch from %d to %d\n", old, new);
|
|
||||||
|
|
||||||
if (new >= dev->queue_count) {
|
|
||||||
clear_bit(0, &dev->context_flag);
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (new == dev->last_context) {
|
|
||||||
clear_bit(0, &dev->context_flag);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
q = dev->queuelist[new];
|
|
||||||
atomic_inc(&q->use_count);
|
|
||||||
if (atomic_read(&q->use_count) == 1) {
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
clear_bit(0, &dev->context_flag);
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(context_switch_complete)(drm_device_t *dev, int new)
|
|
||||||
{
|
|
||||||
drm_device_dma_t *dma = dev->dma;
|
|
||||||
|
|
||||||
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
|
|
||||||
dev->last_switch = jiffies;
|
|
||||||
|
|
||||||
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
|
|
||||||
DRM_ERROR("Lock isn't held after context switch\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
|
|
||||||
if (DRM(lock_free)(dev, &dev->lock.hw_lock->lock,
|
|
||||||
DRM_KERNEL_CONTEXT)) {
|
|
||||||
DRM_ERROR("Cannot free lock\n");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
clear_bit(0, &dev->context_flag);
|
|
||||||
DRM_WAKEUP_INT(&dev->context_wait);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int DRM(init_queue)(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx)
|
|
||||||
{
|
|
||||||
DRM_DEBUG("\n");
|
|
||||||
|
|
||||||
if (atomic_read(&q->use_count) != 1
|
|
||||||
|| atomic_read(&q->finalization)
|
|
||||||
|| atomic_read(&q->block_count)) {
|
|
||||||
DRM_ERROR("New queue is already in use: u%ld f%ld b%ld\n",
|
|
||||||
(unsigned long)atomic_read(&q->use_count),
|
|
||||||
(unsigned long)atomic_read(&q->finalization),
|
|
||||||
(unsigned long)atomic_read(&q->block_count));
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic_set(&q->finalization, 0);
|
|
||||||
atomic_set(&q->block_count, 0);
|
|
||||||
atomic_set(&q->block_read, 0);
|
|
||||||
atomic_set(&q->block_write, 0);
|
|
||||||
atomic_set(&q->total_queued, 0);
|
|
||||||
atomic_set(&q->total_flushed, 0);
|
|
||||||
atomic_set(&q->total_locks, 0);
|
|
||||||
|
|
||||||
q->write_queue = 0;
|
|
||||||
q->read_queue = 0;
|
|
||||||
q->flush_queue = 0;
|
|
||||||
|
|
||||||
q->flags = ctx->flags;
|
|
||||||
|
|
||||||
DRM(waitlist_create)(&q->waitlist, dev->dma->buf_count);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/* drm_alloc_queue:
|
|
||||||
PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
|
|
||||||
disappear (so all deallocation must be done after IOCTLs are off)
|
|
||||||
2) dev->queue_count < dev->queue_slots
|
|
||||||
3) dev->queuelist[i].use_count == 0 and
|
|
||||||
dev->queuelist[i].finalization == 0 if i not in use
|
|
||||||
POST: 1) dev->queuelist[i].use_count == 1
|
|
||||||
2) dev->queue_count < dev->queue_slots */
|
|
||||||
|
|
||||||
static int DRM(alloc_queue)(drm_device_t *dev)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
drm_queue_t *queue;
|
|
||||||
int oldslots;
|
|
||||||
int newslots;
|
|
||||||
/* Check for a free queue */
|
|
||||||
for (i = 0; i < dev->queue_count; i++) {
|
|
||||||
atomic_inc(&dev->queuelist[i]->use_count);
|
|
||||||
if (atomic_read(&dev->queuelist[i]->use_count) == 1
|
|
||||||
&& !atomic_read(&dev->queuelist[i]->finalization)) {
|
|
||||||
DRM_DEBUG("%d (free)\n", i);
|
|
||||||
return i;
|
|
||||||
}
|
|
||||||
atomic_dec(&dev->queuelist[i]->use_count);
|
|
||||||
}
|
|
||||||
/* Allocate a new queue */
|
|
||||||
DRM_LOCK;
|
|
||||||
|
|
||||||
queue = gamma_alloc(sizeof(*queue), DRM_MEM_QUEUES);
|
|
||||||
memset(queue, 0, sizeof(*queue));
|
|
||||||
atomic_set(&queue->use_count, 1);
|
|
||||||
|
|
||||||
++dev->queue_count;
|
|
||||||
if (dev->queue_count >= dev->queue_slots) {
|
|
||||||
oldslots = dev->queue_slots * sizeof(*dev->queuelist);
|
|
||||||
if (!dev->queue_slots) dev->queue_slots = 1;
|
|
||||||
dev->queue_slots *= 2;
|
|
||||||
newslots = dev->queue_slots * sizeof(*dev->queuelist);
|
|
||||||
|
|
||||||
dev->queuelist = DRM(realloc)(dev->queuelist,
|
|
||||||
oldslots,
|
|
||||||
newslots,
|
|
||||||
DRM_MEM_QUEUES);
|
|
||||||
if (!dev->queuelist) {
|
|
||||||
DRM_UNLOCK;
|
|
||||||
DRM_DEBUG("out of memory\n");
|
|
||||||
return DRM_ERR(ENOMEM);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dev->queuelist[dev->queue_count-1] = queue;
|
|
||||||
|
|
||||||
DRM_UNLOCK;
|
|
||||||
DRM_DEBUG("%d (new)\n", dev->queue_count - 1);
|
|
||||||
return dev->queue_count - 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(resctx)( DRM_IOCTL_ARGS )
|
|
||||||
{
|
|
||||||
drm_ctx_res_t res;
|
|
||||||
drm_ctx_t ctx;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
|
|
||||||
|
|
||||||
DRM_COPY_FROM_USER_IOCTL( res, (drm_ctx_res_t *)data, sizeof(res) );
|
|
||||||
|
|
||||||
if (res.count >= DRM_RESERVED_CONTEXTS) {
|
|
||||||
memset(&ctx, 0, sizeof(ctx));
|
|
||||||
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
|
|
||||||
ctx.handle = i;
|
|
||||||
if (DRM_COPY_TO_USER(&res.contexts[i],
|
|
||||||
&i,
|
|
||||||
sizeof(i)))
|
|
||||||
return DRM_ERR(EFAULT);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
res.count = DRM_RESERVED_CONTEXTS;
|
|
||||||
|
|
||||||
DRM_COPY_TO_USER_IOCTL( (drm_ctx_res_t *)data, res, sizeof(res) );
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(addctx)( DRM_IOCTL_ARGS )
|
|
||||||
{
|
|
||||||
DRM_DEVICE;
|
|
||||||
drm_ctx_t ctx;
|
|
||||||
|
|
||||||
DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
|
|
||||||
|
|
||||||
if ((ctx.handle = DRM(alloc_queue)(dev)) == DRM_KERNEL_CONTEXT) {
|
|
||||||
/* Init kernel's context and get a new one. */
|
|
||||||
DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
|
|
||||||
ctx.handle = DRM(alloc_queue)(dev);
|
|
||||||
}
|
|
||||||
DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
|
|
||||||
DRM_DEBUG("%d\n", ctx.handle);
|
|
||||||
|
|
||||||
DRM_COPY_TO_USER_IOCTL( (drm_ctx_t *)data, ctx, sizeof(ctx) );
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(modctx)( DRM_IOCTL_ARGS )
|
|
||||||
{
|
|
||||||
DRM_DEVICE;
|
|
||||||
drm_ctx_t ctx;
|
|
||||||
drm_queue_t *q;
|
|
||||||
|
|
||||||
DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
|
|
||||||
|
|
||||||
DRM_DEBUG("%d\n", ctx.handle);
|
|
||||||
|
|
||||||
if (ctx.handle < 0 || ctx.handle >= dev->queue_count)
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
q = dev->queuelist[ctx.handle];
|
|
||||||
|
|
||||||
atomic_inc(&q->use_count);
|
|
||||||
if (atomic_read(&q->use_count) == 1) {
|
|
||||||
/* No longer in use */
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (DRM_BUFCOUNT(&q->waitlist)) {
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
return DRM_ERR(EBUSY);
|
|
||||||
}
|
|
||||||
|
|
||||||
q->flags = ctx.flags;
|
|
||||||
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(getctx)( DRM_IOCTL_ARGS )
|
|
||||||
{
|
|
||||||
DRM_DEVICE;
|
|
||||||
drm_ctx_t ctx;
|
|
||||||
drm_queue_t *q;
|
|
||||||
|
|
||||||
DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
|
|
||||||
|
|
||||||
DRM_DEBUG("%d\n", ctx.handle);
|
|
||||||
|
|
||||||
if (ctx.handle >= dev->queue_count)
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
q = dev->queuelist[ctx.handle];
|
|
||||||
|
|
||||||
atomic_inc(&q->use_count);
|
|
||||||
if (atomic_read(&q->use_count) == 1) {
|
|
||||||
/* No longer in use */
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.flags = q->flags;
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
|
|
||||||
DRM_COPY_TO_USER_IOCTL( (drm_ctx_t *)data, ctx, sizeof(ctx) );
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(switchctx)( DRM_IOCTL_ARGS )
|
|
||||||
{
|
|
||||||
DRM_DEVICE;
|
|
||||||
drm_ctx_t ctx;
|
|
||||||
|
|
||||||
DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
|
|
||||||
|
|
||||||
DRM_DEBUG("%d\n", ctx.handle);
|
|
||||||
return DRM(context_switch)(dev, dev->last_context, ctx.handle);
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(newctx)( DRM_IOCTL_ARGS )
|
|
||||||
{
|
|
||||||
DRM_DEVICE;
|
|
||||||
drm_ctx_t ctx;
|
|
||||||
|
|
||||||
DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
|
|
||||||
|
|
||||||
DRM_DEBUG("%d\n", ctx.handle);
|
|
||||||
DRM(context_switch_complete)(dev, ctx.handle);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(rmctx)( DRM_IOCTL_ARGS )
|
|
||||||
{
|
|
||||||
DRM_DEVICE;
|
|
||||||
drm_ctx_t ctx;
|
|
||||||
drm_queue_t *q;
|
|
||||||
drm_buf_t *buf;
|
|
||||||
|
|
||||||
DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
|
|
||||||
|
|
||||||
DRM_DEBUG("%d\n", ctx.handle);
|
|
||||||
|
|
||||||
if (ctx.handle >= dev->queue_count) return DRM_ERR(EINVAL);
|
|
||||||
q = dev->queuelist[ctx.handle];
|
|
||||||
|
|
||||||
atomic_inc(&q->use_count);
|
|
||||||
if (atomic_read(&q->use_count) == 1) {
|
|
||||||
/* No longer in use */
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic_inc(&q->finalization); /* Mark queue in finalization state */
|
|
||||||
atomic_sub(2, &q->use_count); /* Mark queue as unused (pending
|
|
||||||
finalization) */
|
|
||||||
|
|
||||||
while (test_and_set_bit(0, &dev->interrupt_flag)) {
|
|
||||||
static int never;
|
|
||||||
int retcode;
|
|
||||||
retcode = tsleep(&never, PZERO|PCATCH, "never", 1);
|
|
||||||
if (retcode)
|
|
||||||
return retcode;
|
|
||||||
}
|
|
||||||
/* Remove queued buffers */
|
|
||||||
while ((buf = DRM(waitlist_get)(&q->waitlist))) {
|
|
||||||
DRM(free_buffer)(dev, buf);
|
|
||||||
}
|
|
||||||
clear_bit(0, &dev->interrupt_flag);
|
|
||||||
|
|
||||||
/* Wakeup blocked processes */
|
|
||||||
wakeup( &q->block_read );
|
|
||||||
wakeup( &q->block_write );
|
|
||||||
DRM_WAKEUP_INT( &q->flush_queue );
|
|
||||||
/* Finalization over. Queue is made
|
|
||||||
available when both use_count and
|
|
||||||
finalization become 0, which won't
|
|
||||||
happen until all the waiting processes
|
|
||||||
stop waiting. */
|
|
||||||
atomic_dec(&q->finalization);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __HAVE_CTX_BITMAP */
|
|
||||||
|
|
265
bsd/drm_dma.h
265
bsd/drm_dma.h
|
@ -171,271 +171,6 @@ void DRM(reclaim_buffers)(drm_device_t *dev, DRMFILE filp)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
/* GH: This is a big hack for now...
|
|
||||||
*/
|
|
||||||
#if __HAVE_OLD_DMA
|
|
||||||
|
|
||||||
void DRM(clear_next_buffer)(drm_device_t *dev)
|
|
||||||
{
|
|
||||||
drm_device_dma_t *dma = dev->dma;
|
|
||||||
|
|
||||||
dma->next_buffer = NULL;
|
|
||||||
if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
|
|
||||||
DRM_WAKEUP_INT(&dma->next_queue->flush_queue);
|
|
||||||
}
|
|
||||||
dma->next_queue = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long))
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
int candidate = -1;
|
|
||||||
int j = jiffies;
|
|
||||||
|
|
||||||
if (!dev) {
|
|
||||||
DRM_ERROR("No device\n");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
|
|
||||||
/* This only happens between the time the
|
|
||||||
interrupt is initialized and the time
|
|
||||||
the queues are initialized. */
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Doing "while locked" DMA? */
|
|
||||||
if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
|
|
||||||
return DRM_KERNEL_CONTEXT;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If there are buffers on the last_context
|
|
||||||
queue, and we have not been executing
|
|
||||||
this context very long, continue to
|
|
||||||
execute this context. */
|
|
||||||
if (dev->last_switch <= j
|
|
||||||
&& dev->last_switch + DRM_TIME_SLICE > j
|
|
||||||
&& DRM_WAITCOUNT(dev, dev->last_context)) {
|
|
||||||
return dev->last_context;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Otherwise, find a candidate */
|
|
||||||
for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
|
|
||||||
if (DRM_WAITCOUNT(dev, i)) {
|
|
||||||
candidate = dev->last_checked = i;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (candidate < 0) {
|
|
||||||
for (i = 0; i < dev->queue_count; i++) {
|
|
||||||
if (DRM_WAITCOUNT(dev, i)) {
|
|
||||||
candidate = dev->last_checked = i;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (wrapper
|
|
||||||
&& candidate >= 0
|
|
||||||
&& candidate != dev->last_context
|
|
||||||
&& dev->last_switch <= j
|
|
||||||
&& dev->last_switch + DRM_TIME_SLICE > j) {
|
|
||||||
int s = splclock();
|
|
||||||
if (dev->timer.c_time != dev->last_switch + DRM_TIME_SLICE) {
|
|
||||||
callout_reset(&dev->timer,
|
|
||||||
dev->last_switch + DRM_TIME_SLICE - j,
|
|
||||||
(void (*)(void *))wrapper,
|
|
||||||
dev);
|
|
||||||
}
|
|
||||||
splx(s);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return candidate;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int DRM(dma_enqueue)(drm_device_t *dev, drm_dma_t *d)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
drm_queue_t *q;
|
|
||||||
drm_buf_t *buf;
|
|
||||||
int idx;
|
|
||||||
int while_locked = 0;
|
|
||||||
drm_device_dma_t *dma = dev->dma;
|
|
||||||
int error;
|
|
||||||
|
|
||||||
DRM_DEBUG("%d\n", d->send_count);
|
|
||||||
|
|
||||||
if (d->flags & _DRM_DMA_WHILE_LOCKED) {
|
|
||||||
int context = dev->lock.hw_lock->lock;
|
|
||||||
|
|
||||||
if (!_DRM_LOCK_IS_HELD(context)) {
|
|
||||||
DRM_ERROR("No lock held during \"while locked\""
|
|
||||||
" request\n");
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
}
|
|
||||||
if (d->context != _DRM_LOCKING_CONTEXT(context)
|
|
||||||
&& _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
|
|
||||||
DRM_ERROR("Lock held by %d while %d makes"
|
|
||||||
" \"while locked\" request\n",
|
|
||||||
_DRM_LOCKING_CONTEXT(context),
|
|
||||||
d->context);
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
}
|
|
||||||
q = dev->queuelist[DRM_KERNEL_CONTEXT];
|
|
||||||
while_locked = 1;
|
|
||||||
} else {
|
|
||||||
q = dev->queuelist[d->context];
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
atomic_inc(&q->use_count);
|
|
||||||
if (atomic_read(&q->block_write)) {
|
|
||||||
atomic_inc(&q->block_count);
|
|
||||||
for (;;) {
|
|
||||||
if (!atomic_read(&q->block_write)) break;
|
|
||||||
error = tsleep(&q->block_write, PZERO|PCATCH,
|
|
||||||
"dmawr", 0);
|
|
||||||
if (error) {
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
atomic_dec(&q->block_count);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < d->send_count; i++) {
|
|
||||||
idx = d->send_indices[i];
|
|
||||||
if (idx < 0 || idx >= dma->buf_count) {
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
DRM_ERROR("Index %d (of %d max)\n",
|
|
||||||
d->send_indices[i], dma->buf_count - 1);
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
}
|
|
||||||
buf = dma->buflist[ idx ];
|
|
||||||
if (buf->pid != DRM_CURRENTPID) {
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
DRM_ERROR("Process %d using buffer owned by %d\n",
|
|
||||||
DRM_CURRENTPID, buf->pid);
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
}
|
|
||||||
if (buf->list != DRM_LIST_NONE) {
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
DRM_ERROR("Process %d using buffer %d on list %d\n",
|
|
||||||
DRM_CURRENTPID, buf->idx, buf->list);
|
|
||||||
}
|
|
||||||
buf->used = d->send_sizes[i];
|
|
||||||
buf->while_locked = while_locked;
|
|
||||||
buf->context = d->context;
|
|
||||||
if (!buf->used) {
|
|
||||||
DRM_ERROR("Queueing 0 length buffer\n");
|
|
||||||
}
|
|
||||||
if (buf->pending) {
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
DRM_ERROR("Queueing pending buffer:"
|
|
||||||
" buffer %d, offset %d\n",
|
|
||||||
d->send_indices[i], i);
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
}
|
|
||||||
if (buf->waiting) {
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
DRM_ERROR("Queueing waiting buffer:"
|
|
||||||
" buffer %d, offset %d\n",
|
|
||||||
d->send_indices[i], i);
|
|
||||||
return DRM_ERR(EINVAL);
|
|
||||||
}
|
|
||||||
buf->waiting = 1;
|
|
||||||
if (atomic_read(&q->use_count) == 1
|
|
||||||
|| atomic_read(&q->finalization)) {
|
|
||||||
DRM(free_buffer)(dev, buf);
|
|
||||||
} else {
|
|
||||||
DRM(waitlist_put)(&q->waitlist, buf);
|
|
||||||
atomic_inc(&q->total_queued);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int DRM(dma_get_buffers_of_order)(drm_device_t *dev, drm_dma_t *d,
|
|
||||||
int order)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
drm_buf_t *buf;
|
|
||||||
drm_device_dma_t *dma = dev->dma;
|
|
||||||
|
|
||||||
for (i = d->granted_count; i < d->request_count; i++) {
|
|
||||||
buf = DRM(freelist_get)(&dma->bufs[order].freelist,
|
|
||||||
d->flags & _DRM_DMA_WAIT);
|
|
||||||
if (!buf) break;
|
|
||||||
if (buf->pending || buf->waiting) {
|
|
||||||
DRM_ERROR("Free buffer %d in use by %d (w%d, p%d)\n",
|
|
||||||
buf->idx,
|
|
||||||
buf->pid,
|
|
||||||
buf->waiting,
|
|
||||||
buf->pending);
|
|
||||||
}
|
|
||||||
buf->pid = DRM_CURRENTPID;
|
|
||||||
if (DRM_COPY_TO_USER(&d->request_indices[i],
|
|
||||||
&buf->idx,
|
|
||||||
sizeof(buf->idx)))
|
|
||||||
return DRM_ERR(EFAULT);
|
|
||||||
|
|
||||||
if (DRM_COPY_TO_USER(&d->request_sizes[i],
|
|
||||||
&buf->total,
|
|
||||||
sizeof(buf->total)))
|
|
||||||
return DRM_ERR(EFAULT);
|
|
||||||
|
|
||||||
++d->granted_count;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int DRM(dma_get_buffers)(drm_device_t *dev, drm_dma_t *dma)
|
|
||||||
{
|
|
||||||
int order;
|
|
||||||
int retcode = 0;
|
|
||||||
int tmp_order;
|
|
||||||
|
|
||||||
order = DRM(order)(dma->request_size);
|
|
||||||
|
|
||||||
dma->granted_count = 0;
|
|
||||||
retcode = DRM(dma_get_buffers_of_order)(dev, dma, order);
|
|
||||||
|
|
||||||
if (dma->granted_count < dma->request_count
|
|
||||||
&& (dma->flags & _DRM_DMA_SMALLER_OK)) {
|
|
||||||
for (tmp_order = order - 1;
|
|
||||||
!retcode
|
|
||||||
&& dma->granted_count < dma->request_count
|
|
||||||
&& tmp_order >= DRM_MIN_ORDER;
|
|
||||||
--tmp_order) {
|
|
||||||
|
|
||||||
retcode = DRM(dma_get_buffers_of_order)(dev, dma,
|
|
||||||
tmp_order);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (dma->granted_count < dma->request_count
|
|
||||||
&& (dma->flags & _DRM_DMA_LARGER_OK)) {
|
|
||||||
for (tmp_order = order + 1;
|
|
||||||
!retcode
|
|
||||||
&& dma->granted_count < dma->request_count
|
|
||||||
&& tmp_order <= DRM_MAX_ORDER;
|
|
||||||
++tmp_order) {
|
|
||||||
|
|
||||||
retcode = DRM(dma_get_buffers_of_order)(dev, dma,
|
|
||||||
tmp_order);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __HAVE_OLD_DMA */
|
|
||||||
|
|
||||||
|
|
||||||
#if __HAVE_DMA_IRQ
|
#if __HAVE_DMA_IRQ
|
||||||
|
|
||||||
int DRM(irq_install)( drm_device_t *dev, int irq )
|
int DRM(irq_install)( drm_device_t *dev, int irq )
|
||||||
|
|
|
@ -68,12 +68,6 @@
|
||||||
#ifndef __HAVE_DMA_SCHEDULE
|
#ifndef __HAVE_DMA_SCHEDULE
|
||||||
#define __HAVE_DMA_SCHEDULE 0
|
#define __HAVE_DMA_SCHEDULE 0
|
||||||
#endif
|
#endif
|
||||||
#ifndef __HAVE_DMA_FLUSH
|
|
||||||
#define __HAVE_DMA_FLUSH 0
|
|
||||||
#endif
|
|
||||||
#ifndef __HAVE_DMA_READY
|
|
||||||
#define __HAVE_DMA_READY 0
|
|
||||||
#endif
|
|
||||||
#ifndef __HAVE_DMA_QUIESCENT
|
#ifndef __HAVE_DMA_QUIESCENT
|
||||||
#define __HAVE_DMA_QUIESCENT 0
|
#define __HAVE_DMA_QUIESCENT 0
|
||||||
#endif
|
#endif
|
||||||
|
@ -150,8 +144,8 @@ static drm_ioctl_desc_t DRM(ioctls)[] = {
|
||||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = { DRM(getstats), 0, 0 },
|
[DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = { DRM(getstats), 0, 0 },
|
||||||
|
|
||||||
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { DRM(setunique), 1, 1 },
|
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { DRM(setunique), 1, 1 },
|
||||||
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { DRM(block), 1, 1 },
|
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { DRM(noop), 1, 1 },
|
||||||
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { DRM(unblock), 1, 1 },
|
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { DRM(noop), 1, 1 },
|
||||||
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { DRM(authmagic), 1, 1 },
|
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { DRM(authmagic), 1, 1 },
|
||||||
|
|
||||||
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { DRM(addmap), 1, 1 },
|
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { DRM(addmap), 1, 1 },
|
||||||
|
@ -175,7 +169,7 @@ static drm_ioctl_desc_t DRM(ioctls)[] = {
|
||||||
|
|
||||||
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { DRM(lock), 1, 0 },
|
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { DRM(lock), 1, 0 },
|
||||||
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { DRM(unlock), 1, 0 },
|
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { DRM(unlock), 1, 0 },
|
||||||
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(finish), 1, 0 },
|
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(noop), 1, 0 },
|
||||||
|
|
||||||
#if __HAVE_DMA
|
#if __HAVE_DMA
|
||||||
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { DRM(addbufs), 1, 1 },
|
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { DRM(addbufs), 1, 1 },
|
||||||
|
@ -514,12 +508,6 @@ static int DRM(setup)( drm_device_t *dev )
|
||||||
|
|
||||||
DRM_DEBUG( "\n" );
|
DRM_DEBUG( "\n" );
|
||||||
|
|
||||||
/* The kernel's context could be created here, but is now created
|
|
||||||
* in drm_dma_enqueue. This is more resource-efficient for
|
|
||||||
* hardware that does not do DMA, but may mean that
|
|
||||||
* drm_select_queue fails between the time the interrupt is
|
|
||||||
* initialized and the time the queues are initialized.
|
|
||||||
*/
|
|
||||||
DRIVER_POSTSETUP();
|
DRIVER_POSTSETUP();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1112,9 +1100,6 @@ int DRM(lock)( DRM_IOCTL_ARGS )
|
||||||
q = dev->queuelist[lock.context];
|
q = dev->queuelist[lock.context];
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __HAVE_DMA_FLUSH
|
|
||||||
ret = DRM(flush_block_and_flush)( dev, lock.context, lock.flags );
|
|
||||||
#endif
|
|
||||||
if ( !ret ) {
|
if ( !ret ) {
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if ( !dev->lock.hw_lock ) {
|
if ( !dev->lock.hw_lock ) {
|
||||||
|
@ -1140,18 +1125,9 @@ int DRM(lock)( DRM_IOCTL_ARGS )
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#if __HAVE_DMA_FLUSH
|
|
||||||
DRM(flush_unblock)( dev, lock.context, lock.flags ); /* cleanup phase */
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if ( !ret ) {
|
if ( !ret ) {
|
||||||
/* FIXME: Add signal blocking here */
|
/* FIXME: Add signal blocking here */
|
||||||
|
|
||||||
#if __HAVE_DMA_READY
|
|
||||||
if ( lock.flags & _DRM_LOCK_READY ) {
|
|
||||||
DRIVER_DMA_READY();
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#if __HAVE_DMA_QUIESCENT
|
#if __HAVE_DMA_QUIESCENT
|
||||||
if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
|
if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
|
||||||
DRIVER_DMA_QUIESCENT();
|
DRIVER_DMA_QUIESCENT();
|
||||||
|
|
|
@ -230,3 +230,9 @@ int DRM(getstats)( DRM_IOCTL_ARGS )
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int DRM(noop)(DRM_IOCTL_ARGS)
|
||||||
|
{
|
||||||
|
DRM_DEBUG("\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
235
bsd/drm_lists.h
235
bsd/drm_lists.h
|
@ -1,235 +0,0 @@
|
||||||
/* drm_lists.h -- Buffer list handling routines -*- linux-c -*-
|
|
||||||
* Created: Mon Apr 19 20:54:22 1999 by faith@valinux.com
|
|
||||||
*
|
|
||||||
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
|
|
||||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
|
||||||
* All Rights Reserved.
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice (including the next
|
|
||||||
* paragraph) shall be included in all copies or substantial portions of the
|
|
||||||
* Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
||||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
||||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
*
|
|
||||||
* Authors:
|
|
||||||
* Rickard E. (Rik) Faith <faith@valinux.com>
|
|
||||||
* Gareth Hughes <gareth@valinux.com>
|
|
||||||
*
|
|
||||||
* $FreeBSD: src/sys/dev/drm/drm_lists.h,v 1.3 2003/03/09 02:08:28 anholt Exp $
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "drmP.h"
|
|
||||||
|
|
||||||
#if __HAVE_DMA_WAITLIST
|
|
||||||
|
|
||||||
int DRM(waitlist_create)(drm_waitlist_t *bl, int count)
|
|
||||||
{
|
|
||||||
if (bl->count)
|
|
||||||
return DRM_ERR( EINVAL );
|
|
||||||
|
|
||||||
bl->bufs = DRM(alloc)((bl->count + 2) * sizeof(*bl->bufs),
|
|
||||||
DRM_MEM_BUFLISTS);
|
|
||||||
|
|
||||||
if(!bl->bufs) return DRM_ERR(ENOMEM);
|
|
||||||
|
|
||||||
bzero(bl->bufs, sizeof(*bl->bufs));
|
|
||||||
|
|
||||||
bl->count = count;
|
|
||||||
bl->rp = bl->bufs;
|
|
||||||
bl->wp = bl->bufs;
|
|
||||||
bl->end = &bl->bufs[bl->count+1];
|
|
||||||
DRM_SPININIT( bl->write_lock, "writelock" );
|
|
||||||
DRM_SPININIT( bl->read_lock, "readlock" );
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(waitlist_destroy)(drm_waitlist_t *bl)
|
|
||||||
{
|
|
||||||
if (bl->rp != bl->wp)
|
|
||||||
return DRM_ERR( EINVAL );
|
|
||||||
if (bl->bufs) DRM(free)(bl->bufs,
|
|
||||||
(bl->count + 2) * sizeof(*bl->bufs),
|
|
||||||
DRM_MEM_BUFLISTS);
|
|
||||||
bl->count = 0;
|
|
||||||
bl->bufs = NULL;
|
|
||||||
bl->rp = NULL;
|
|
||||||
bl->wp = NULL;
|
|
||||||
bl->end = NULL;
|
|
||||||
DRM_SPINUNINIT( bl->write_lock );
|
|
||||||
DRM_SPINUNINIT( bl->read_lock );
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf)
|
|
||||||
{
|
|
||||||
int left;
|
|
||||||
int s;
|
|
||||||
left = DRM_LEFTCOUNT(bl);
|
|
||||||
if (!left) {
|
|
||||||
DRM_ERROR("Overflow while adding buffer %d from pid %d\n",
|
|
||||||
buf->idx, buf->pid);
|
|
||||||
return DRM_ERR( EINVAL );
|
|
||||||
}
|
|
||||||
buf->list = DRM_LIST_WAIT;
|
|
||||||
|
|
||||||
DRM_SPINLOCK(&bl->write_lock);
|
|
||||||
s = spldrm();
|
|
||||||
*bl->wp = buf;
|
|
||||||
if (++bl->wp >= bl->end) bl->wp = bl->bufs;
|
|
||||||
splx(s);
|
|
||||||
DRM_SPINUNLOCK(&bl->write_lock);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl)
|
|
||||||
{
|
|
||||||
drm_buf_t *buf;
|
|
||||||
int s;
|
|
||||||
|
|
||||||
DRM_SPINLOCK(&bl->read_lock);
|
|
||||||
s = spldrm();
|
|
||||||
buf = *bl->rp;
|
|
||||||
if (bl->rp == bl->wp) {
|
|
||||||
splx(s);
|
|
||||||
DRM_SPINUNLOCK(&bl->read_lock);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
if (++bl->rp >= bl->end) bl->rp = bl->bufs;
|
|
||||||
splx(s);
|
|
||||||
DRM_SPINUNLOCK(&bl->read_lock);
|
|
||||||
|
|
||||||
return buf;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __HAVE_DMA_WAITLIST */
|
|
||||||
|
|
||||||
|
|
||||||
#if __HAVE_DMA_FREELIST
|
|
||||||
|
|
||||||
int DRM(freelist_create)(drm_freelist_t *bl, int count)
|
|
||||||
{
|
|
||||||
atomic_set(&bl->count, 0);
|
|
||||||
bl->next = NULL;
|
|
||||||
bl->waiting = 0;
|
|
||||||
|
|
||||||
bl->low_mark = 0;
|
|
||||||
bl->high_mark = 0;
|
|
||||||
atomic_set(&bl->wfh, 0);
|
|
||||||
DRM_SPININIT( bl->lock, "freelistlock" );
|
|
||||||
++bl->initialized;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(freelist_destroy)(drm_freelist_t *bl)
|
|
||||||
{
|
|
||||||
atomic_set(&bl->count, 0);
|
|
||||||
bl->next = NULL;
|
|
||||||
DRM_SPINUNINIT( bl->lock );
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl, drm_buf_t *buf)
|
|
||||||
{
|
|
||||||
drm_device_dma_t *dma = dev->dma;
|
|
||||||
|
|
||||||
if (!dma) {
|
|
||||||
DRM_ERROR("No DMA support\n");
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (buf->waiting || buf->pending || buf->list == DRM_LIST_FREE) {
|
|
||||||
DRM_ERROR("Freed buffer %d: w%d, p%d, l%d\n",
|
|
||||||
buf->idx, buf->waiting, buf->pending, buf->list);
|
|
||||||
}
|
|
||||||
if (!bl) return 1;
|
|
||||||
buf->list = DRM_LIST_FREE;
|
|
||||||
|
|
||||||
DRM_SPINLOCK( &bl->lock );
|
|
||||||
buf->next = bl->next;
|
|
||||||
bl->next = buf;
|
|
||||||
DRM_SPINUNLOCK( &bl->lock );
|
|
||||||
|
|
||||||
atomic_inc(&bl->count);
|
|
||||||
if (atomic_read(&bl->count) > dma->buf_count) {
|
|
||||||
DRM_ERROR("%ld of %d buffers free after addition of %d\n",
|
|
||||||
(unsigned long)atomic_read(&bl->count),
|
|
||||||
dma->buf_count, buf->idx);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
/* Check for high water mark */
|
|
||||||
if (atomic_read(&bl->wfh) && atomic_read(&bl->count)>=bl->high_mark) {
|
|
||||||
atomic_set(&bl->wfh, 0);
|
|
||||||
DRM_WAKEUP_INT((void *)&bl->waiting);
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static drm_buf_t *DRM(freelist_try)(drm_freelist_t *bl)
|
|
||||||
{
|
|
||||||
drm_buf_t *buf;
|
|
||||||
|
|
||||||
if (!bl) return NULL;
|
|
||||||
|
|
||||||
/* Get buffer */
|
|
||||||
DRM_SPINLOCK(&bl->lock);
|
|
||||||
if (!bl->next) {
|
|
||||||
DRM_SPINUNLOCK(&bl->lock);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
buf = bl->next;
|
|
||||||
bl->next = bl->next->next;
|
|
||||||
DRM_SPINUNLOCK(&bl->lock);
|
|
||||||
|
|
||||||
atomic_dec(&bl->count);
|
|
||||||
buf->next = NULL;
|
|
||||||
buf->list = DRM_LIST_NONE;
|
|
||||||
if (buf->waiting || buf->pending) {
|
|
||||||
DRM_ERROR("Free buffer %d: w%d, p%d, l%d\n",
|
|
||||||
buf->idx, buf->waiting, buf->pending, buf->list);
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf;
|
|
||||||
}
|
|
||||||
|
|
||||||
drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block)
|
|
||||||
{
|
|
||||||
drm_buf_t *buf = NULL;
|
|
||||||
int error;
|
|
||||||
|
|
||||||
if (!bl || !bl->initialized) return NULL;
|
|
||||||
|
|
||||||
/* Check for low water mark */
|
|
||||||
if (atomic_read(&bl->count) <= bl->low_mark) /* Became low */
|
|
||||||
atomic_set(&bl->wfh, 1);
|
|
||||||
if (atomic_read(&bl->wfh)) {
|
|
||||||
if (block) {
|
|
||||||
for (;;) {
|
|
||||||
if (!atomic_read(&bl->wfh)
|
|
||||||
&& (buf = DRM(freelist_try(bl)))) break;
|
|
||||||
error = tsleep((void *)&bl->waiting, PZERO|PCATCH,
|
|
||||||
"drmfg", 0);
|
|
||||||
if (error)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return buf;
|
|
||||||
}
|
|
||||||
|
|
||||||
return DRM(freelist_try)(bl);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __HAVE_DMA_FREELIST */
|
|
107
bsd/drm_lock.h
107
bsd/drm_lock.h
|
@ -33,18 +33,6 @@
|
||||||
|
|
||||||
#include "drmP.h"
|
#include "drmP.h"
|
||||||
|
|
||||||
int DRM(block)( DRM_IOCTL_ARGS )
|
|
||||||
{
|
|
||||||
DRM_DEBUG("\n");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(unblock)( DRM_IOCTL_ARGS )
|
|
||||||
{
|
|
||||||
DRM_DEBUG("\n");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(lock_take)(__volatile__ unsigned int *lock, unsigned int context)
|
int DRM(lock_take)(__volatile__ unsigned int *lock, unsigned int context)
|
||||||
{
|
{
|
||||||
unsigned int old, new;
|
unsigned int old, new;
|
||||||
|
@ -107,98 +95,3 @@ int DRM(lock_free)(drm_device_t *dev,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int DRM(flush_queue)(drm_device_t *dev, int context)
|
|
||||||
{
|
|
||||||
int error;
|
|
||||||
int ret = 0;
|
|
||||||
drm_queue_t *q = dev->queuelist[context];
|
|
||||||
|
|
||||||
DRM_DEBUG("\n");
|
|
||||||
|
|
||||||
atomic_inc(&q->use_count);
|
|
||||||
if (atomic_read(&q->use_count) > 1) {
|
|
||||||
atomic_inc(&q->block_write);
|
|
||||||
atomic_inc(&q->block_count);
|
|
||||||
error = tsleep((void *)&q->flush_queue, PZERO|PCATCH, "drmfq", 0);
|
|
||||||
if (error)
|
|
||||||
return error;
|
|
||||||
atomic_dec(&q->block_count);
|
|
||||||
}
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
|
|
||||||
/* NOTE: block_write is still incremented!
|
|
||||||
Use drm_flush_unlock_queue to decrement. */
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int DRM(flush_unblock_queue)(drm_device_t *dev, int context)
|
|
||||||
{
|
|
||||||
drm_queue_t *q = dev->queuelist[context];
|
|
||||||
|
|
||||||
DRM_DEBUG("\n");
|
|
||||||
|
|
||||||
atomic_inc(&q->use_count);
|
|
||||||
if (atomic_read(&q->use_count) > 1) {
|
|
||||||
if (atomic_read(&q->block_write)) {
|
|
||||||
atomic_dec(&q->block_write);
|
|
||||||
DRM_WAKEUP_INT((void *)&q->write_queue);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
atomic_dec(&q->use_count);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(flush_block_and_flush)(drm_device_t *dev, int context,
|
|
||||||
drm_lock_flags_t flags)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
DRM_DEBUG("\n");
|
|
||||||
|
|
||||||
if (flags & _DRM_LOCK_FLUSH) {
|
|
||||||
ret = DRM(flush_queue)(dev, DRM_KERNEL_CONTEXT);
|
|
||||||
if (!ret) ret = DRM(flush_queue)(dev, context);
|
|
||||||
}
|
|
||||||
if (flags & _DRM_LOCK_FLUSH_ALL) {
|
|
||||||
for (i = 0; !ret && i < dev->queue_count; i++) {
|
|
||||||
ret = DRM(flush_queue)(dev, i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(flush_unblock)(drm_device_t *dev, int context, drm_lock_flags_t flags)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
DRM_DEBUG("\n");
|
|
||||||
|
|
||||||
if (flags & _DRM_LOCK_FLUSH) {
|
|
||||||
ret = DRM(flush_unblock_queue)(dev, DRM_KERNEL_CONTEXT);
|
|
||||||
if (!ret) ret = DRM(flush_unblock_queue)(dev, context);
|
|
||||||
}
|
|
||||||
if (flags & _DRM_LOCK_FLUSH_ALL) {
|
|
||||||
for (i = 0; !ret && i < dev->queue_count; i++) {
|
|
||||||
ret = DRM(flush_unblock_queue)(dev, i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int DRM(finish)( DRM_IOCTL_ARGS )
|
|
||||||
{
|
|
||||||
DRM_DEVICE;
|
|
||||||
int ret = 0;
|
|
||||||
drm_lock_t lock;
|
|
||||||
|
|
||||||
DRM_DEBUG("\n");
|
|
||||||
|
|
||||||
DRM_COPY_FROM_USER_IOCTL( lock, (drm_lock_t *)data, sizeof(lock) );
|
|
||||||
|
|
||||||
ret = DRM(flush_block_and_flush)(dev, lock.context, lock.flags);
|
|
||||||
DRM(flush_unblock)(dev, lock.context, lock.flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
Loading…
Reference in New Issue