Lots of DRM fixes: added new pieces of template code so the ffb driver can

be ported, rolled back r128 and i810 version bumps so 4.1.0 works with
    cvs kernel modules, added Config.in and updated Makefile.kernel,
    incorporated lots of drm fixes inspired by patches sent by Redhat, made
    DRM(realloc) usage check for NULL allocations, restructure driver init
    routines to export dev_priv only when initialized and to check for all
    error conditions.
main
Jeff Hartmann 2001-08-07 18:15:10 +00:00
parent 56bd9c2077
commit 51e38d96ea
34 changed files with 1016 additions and 860 deletions

16
linux-core/Config.in Normal file
View File

@ -0,0 +1,16 @@
#
# drm device configuration
#
# This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
#
bool 'Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)' CONFIG_DRM
if [ "$CONFIG_DRM" != "n" ]; then
tristate ' 3dfx Banshee/Voodoo3+' CONFIG_DRM_TDFX
tristate ' 3dlabs GMX 2000' CONFIG_DRM_GAMMA
tristate ' ATI Rage 128' CONFIG_DRM_R128
dep_tristate ' ATI Radeon' CONFIG_DRM_RADEON $CONFIG_AGP
dep_tristate ' Intel I810' CONFIG_DRM_I810 $CONFIG_AGP
dep_tristate ' Matrox g200/g400' CONFIG_DRM_MGA $CONFIG_AGP
fi

View File

@ -1,91 +1,30 @@
#
# Makefile for the drm device driver. This driver provides support for
# the Direct Rendering Infrastructure (DRI) in XFree86 4.x.
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
# drm.o is a fake target -- it is never built
# The real targets are in the module-list
O_TARGET := drm.o
export-objs := gamma_drv.o tdfx_drv.o r128_drv.o mga_drv.o i810_drv.o \
ffb_drv.o
list-multi := gamma.o tdfx.o r128.o mga.o i810.o ffb.o
module-list := gamma.o tdfx.o r128.o radeon.o ffb.o mga.o i810.o
export-objs := $(patsubst %.o,%_drv.o,$(module-list))
gamma-objs := gamma_drv.o gamma_dma.o
tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o
i810-objs := i810_drv.o i810_dma.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o
ffb-objs := ffb_drv.o ffb_context.o
# libs-objs are included in every module so that radical changes to the
# architecture of the DRM support library can be made at a later time.
#
# The downside is that each module is larger, and a system that uses
# more than one module (i.e., a dual-head system) will use more memory
# (but a system that uses exactly one module will use the same amount of
# memory).
#
# The upside is that if the DRM support library ever becomes insufficient
# for new families of cards, a new library can be implemented for those new
# cards without impacting the drivers for the old cards. This is significant,
# because testing architectural changes to old cards may be impossible, and
# may delay the implementation of a better architecture. We've traded slight
# memory waste (in the dual-head case) for greatly improved long-term
# maintainability.
#
# NOTE: lib-objs will be eliminated in future versions, thereby
# eliminating the need to compile the .o files into every module, but
# for now we still need them.
#
lib-objs := init.o memory.o proc.o auth.o context.o drawable.o bufs.o
lib-objs += lists.o lock.o ioctl.o fops.o vm.o dma.o ctxbitmap.o
ifeq ($(CONFIG_AGP),y)
lib-objs += agpsupport.o
else
ifeq ($(CONFIG_AGP),m)
lib-objs += agpsupport.o
endif
endif
gamma-objs := gamma_drv.o gamma_dma.o
tdfx-objs := tdfx_drv.o tdfx_context.o
r128-objs := r128_drv.o r128_cce.o r128_context.o r128_bufs.o \
r128_state.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_context.o radeon_bufs.o \
radeon_state.o
ffb-objs := ffb_drv.o ffb_context.o
mga-objs := mga_drv.o mga_dma.o mga_context.o mga_bufs.o \
mga_state.o
i810-objs := i810_drv.o i810_dma.o i810_context.o i810_bufs.o
obj-$(CONFIG_DRM_GAMMA) += gamma.o
obj-$(CONFIG_DRM_TDFX) += tdfx.o
obj-$(CONFIG_DRM_R128) += r128.o
obj-$(CONFIG_DRM_RADEON) += radeon.o
obj-$(CONFIG_DRM_FFB) += ffb.o
obj-$(CONFIG_DRM_MGA) += mga.o
obj-$(CONFIG_DRM_I810) += i810.o
# When linking into the kernel, link the library just once.
# If making modules, we include the library into each module
lib-objs-mod := $(patsubst %.o,%-mod.o,$(lib-objs))
ifdef MAKING_MODULES
lib = drmlib-mod.a
else
obj-y += drmlib.a
endif
obj-$(CONFIG_DRM_GAMMA) += gamma.o
obj-$(CONFIG_DRM_TDFX) += tdfx.o
obj-$(CONFIG_DRM_R128) += r128.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o
obj-$(CONFIG_DRM_MGA) += mga.o
obj-$(CONFIG_DRM_I810) += i810.o
obj-$(CONFIG_DRM_FFB) += ffb.o
include $(TOPDIR)/Rules.make
$(patsubst %.o,%.c,$(lib-objs-mod)):
@ln -sf $(subst -mod,,$@) $@
drmlib-mod.a: $(lib-objs-mod)
rm -f $@
$(AR) $(EXTRA_ARFLAGS) rcs $@ $(lib-objs-mod)
drmlib.a: $(lib-objs)
rm -f $@
$(AR) $(EXTRA_ARFLAGS) rcs $@ $(lib-objs)
gamma.o: $(gamma-objs) $(lib)
$(LD) -r -o $@ $(gamma-objs) $(lib)

View File

@ -266,6 +266,46 @@ int DRM(rmmap)(struct inode *inode, struct file *filp,
#if __HAVE_DMA
static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry)
{
int i;
if (entry->seg_count) {
for (i = 0; i < entry->seg_count; i++) {
DRM(free_pages)(entry->seglist[i],
entry->page_order,
DRM_MEM_DMA);
}
DRM(free)(entry->seglist,
entry->seg_count *
sizeof(*entry->seglist),
DRM_MEM_SEGS);
entry->seg_count = 0;
}
if(entry->buf_count) {
for(i = 0; i < entry->buf_count; i++) {
if(entry->buflist[i].dev_private) {
DRM(free)(entry->buflist[i].dev_private,
entry->buflist[i].dev_priv_size,
DRM_MEM_BUFS);
}
}
DRM(free)(entry->buflist,
entry->buf_count *
sizeof(*entry->buflist),
DRM_MEM_BUFS);
#if __HAVE_DMA_FREELIST
DRM(freelist_destroy)(&entry->freelist);
#endif
entry->buf_count = 0;
}
}
#if __REALLY_HAVE_AGP
int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
@ -286,6 +326,7 @@ int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
int total;
int byte_count;
int i;
drm_buf_t **temp_buflist;
if ( !dma ) return -EINVAL;
@ -371,6 +412,11 @@ int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
DRM_MEM_BUFS );
if(!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
DRM(cleanup_buf_error)(entry);
}
memset( buf->dev_private, 0, buf->dev_priv_size );
#if __HAVE_DMA_HISTOGRAM
@ -389,11 +435,20 @@ int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
DRM_DEBUG( "byte_count: %d\n", byte_count );
dma->buflist = DRM(realloc)( dma->buflist,
temp_buflist = DRM(realloc)( dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist),
DRM_MEM_BUFS );
if(!temp_buflist) {
/* Free the entry because it isn't valid */
DRM(cleanup_buf_error)(entry);
up( &dev->struct_sem );
atomic_dec( &dev->buf_alloc );
return -ENOMEM;
}
dma->buflist = temp_buflist;
for ( i = 0 ; i < entry->buf_count ; i++ ) {
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
@ -446,6 +501,8 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
int i;
int byte_count;
int page_count;
unsigned long *temp_pagelist;
drm_buf_t **temp_buflist;
if ( !dma ) return -EINVAL;
@ -512,11 +569,24 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
}
memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
dma->pagelist = DRM(realloc)( dma->pagelist,
temp_pagelist = DRM(realloc)( dma->pagelist,
dma->page_count * sizeof(*dma->pagelist),
(dma->page_count + (count << page_order))
* sizeof(*dma->pagelist),
DRM_MEM_PAGES );
if(!temp_pagelist) {
DRM(free)( entry->buflist,
count * sizeof(*entry->buflist),
DRM_MEM_BUFS );
DRM(free)( entry->seglist,
count * sizeof(*entry->seglist),
DRM_MEM_SEGS );
up( &dev->struct_sem );
atomic_dec( &dev->buf_alloc );
return -ENOMEM;
}
dma->pagelist = temp_pagelist;
DRM_DEBUG( "pagelist: %d entries\n",
dma->page_count + (count << page_order) );
@ -563,11 +633,20 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
byte_count += PAGE_SIZE << page_order;
}
dma->buflist = DRM(realloc)( dma->buflist,
temp_buflist = DRM(realloc)( dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist),
DRM_MEM_BUFS );
if(!temp_buflist) {
/* Free the entry because it isn't valid */
DRM(cleanup_buf_error)(entry);
up( &dev->struct_sem );
atomic_dec( &dev->buf_alloc );
return -ENOMEM;
}
dma->buflist = temp_buflist;
for ( i = 0 ; i < entry->buf_count ; i++ ) {
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
@ -601,67 +680,68 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
int DRM(addbufs_sg)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
drm_buf_entry_t *entry;
drm_buf_t *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
int order;
int size;
int alignment;
int page_order;
int total;
int byte_count;
int i;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
drm_buf_entry_t *entry;
drm_buf_t *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
int order;
int size;
int alignment;
int page_order;
int total;
int byte_count;
int i;
drm_buf_t **temp_buflist;
if ( !dma ) return -EINVAL;
if ( !dma ) return -EINVAL;
if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
sizeof(request) ) )
return -EFAULT;
if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
sizeof(request) ) )
return -EFAULT;
count = request.count;
order = DRM(order)( request.size );
size = 1 << order;
count = request.count;
order = DRM(order)( request.size );
size = 1 << order;
alignment = (request.flags & _DRM_PAGE_ALIGN)
? PAGE_ALIGN(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
alignment = (request.flags & _DRM_PAGE_ALIGN)
? PAGE_ALIGN(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
byte_count = 0;
agp_offset = request.agp_start;
byte_count = 0;
agp_offset = request.agp_start;
DRM_DEBUG( "count: %d\n", count );
DRM_DEBUG( "order: %d\n", order );
DRM_DEBUG( "size: %d\n", size );
DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
DRM_DEBUG( "alignment: %d\n", alignment );
DRM_DEBUG( "page_order: %d\n", page_order );
DRM_DEBUG( "total: %d\n", total );
DRM_DEBUG( "count: %d\n", count );
DRM_DEBUG( "order: %d\n", order );
DRM_DEBUG( "size: %d\n", size );
DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
DRM_DEBUG( "alignment: %d\n", alignment );
DRM_DEBUG( "page_order: %d\n", page_order );
DRM_DEBUG( "total: %d\n", total );
if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
if ( dev->queue_count ) return -EBUSY; /* Not while in use */
if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
if ( dev->queue_count ) return -EBUSY; /* Not while in use */
spin_lock( &dev->count_lock );
if ( dev->buf_use ) {
spin_unlock( &dev->count_lock );
return -EBUSY;
}
atomic_inc( &dev->buf_alloc );
spin_unlock( &dev->count_lock );
spin_lock( &dev->count_lock );
if ( dev->buf_use ) {
spin_unlock( &dev->count_lock );
return -EBUSY;
}
atomic_inc( &dev->buf_alloc );
spin_unlock( &dev->count_lock );
down( &dev->struct_sem );
entry = &dma->bufs[order];
if ( entry->buf_count ) {
up( &dev->struct_sem );
atomic_dec( &dev->buf_alloc );
return -ENOMEM; /* May only call once for each order */
}
down( &dev->struct_sem );
entry = &dma->bufs[order];
if ( entry->buf_count ) {
up( &dev->struct_sem );
atomic_dec( &dev->buf_alloc );
return -ENOMEM; /* May only call once for each order */
}
if (count < 0 || count > 4096) {
up( &dev->struct_sem );
@ -669,90 +749,108 @@ int DRM(addbufs_sg)( struct inode *inode, struct file *filp,
return -EINVAL;
}
entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
DRM_MEM_BUFS );
if ( !entry->buflist ) {
up( &dev->struct_sem );
atomic_dec( &dev->buf_alloc );
return -ENOMEM;
}
memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
DRM_MEM_BUFS );
if ( !entry->buflist ) {
up( &dev->struct_sem );
atomic_dec( &dev->buf_alloc );
return -ENOMEM;
}
memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
entry->buf_size = size;
entry->page_order = page_order;
entry->buf_size = size;
entry->page_order = page_order;
offset = 0;
offset = 0;
while ( entry->buf_count < count ) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
while ( entry->buf_count < count ) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
buf->offset = (dma->byte_count + offset);
buf->bus_address = agp_offset + offset;
buf->address = (void *)(agp_offset + offset + dev->sg->handle);
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
init_waitqueue_head( &buf->dma_wait );
buf->pid = 0;
buf->offset = (dma->byte_count + offset);
buf->bus_address = agp_offset + offset;
buf->address = (void *)(agp_offset + offset + dev->sg->handle);
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
init_waitqueue_head( &buf->dma_wait );
buf->pid = 0;
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
DRM_MEM_BUFS );
memset( buf->dev_private, 0, buf->dev_priv_size );
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
DRM_MEM_BUFS );
if(!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
DRM(cleanup_buf_error)(entry);
up( &dev->struct_sem );
atomic_dec( &dev->buf_alloc );
return -ENOMEM;
}
#if __HAVE_DMA_HISTOGRAM
buf->time_queued = 0;
buf->time_dispatched = 0;
buf->time_completed = 0;
buf->time_freed = 0;
#endif
DRM_DEBUG( "buffer %d @ %p\n",
entry->buf_count, buf->address );
memset( buf->dev_private, 0, buf->dev_priv_size );
offset += alignment;
entry->buf_count++;
byte_count += PAGE_SIZE << page_order;
}
# if __HAVE_DMA_HISTOGRAM
buf->time_queued = 0;
buf->time_dispatched = 0;
buf->time_completed = 0;
buf->time_freed = 0;
# endif
DRM_DEBUG( "buffer %d @ %p\n",
entry->buf_count, buf->address );
DRM_DEBUG( "byte_count: %d\n", byte_count );
offset += alignment;
entry->buf_count++;
byte_count += PAGE_SIZE << page_order;
}
dma->buflist = DRM(realloc)( dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist),
DRM_MEM_BUFS );
for ( i = 0 ; i < entry->buf_count ; i++ ) {
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
DRM_DEBUG( "byte_count: %d\n", byte_count );
dma->buf_count += entry->buf_count;
dma->byte_count += byte_count;
temp_buflist = DRM(realloc)( dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist),
DRM_MEM_BUFS );
if(!temp_buflist) {
/* Free the entry because it isn't valid */
DRM(cleanup_buf_error)(entry);
up( &dev->struct_sem );
atomic_dec( &dev->buf_alloc );
return -ENOMEM;
}
dma->buflist = temp_buflist;
DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
for ( i = 0 ; i < entry->buf_count ; i++ ) {
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
dma->buf_count += entry->buf_count;
dma->byte_count += byte_count;
DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
#if __HAVE_DMA_FREELIST
DRM(freelist_create)( &entry->freelist, entry->buf_count );
for ( i = 0 ; i < entry->buf_count ; i++ ) {
DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
}
DRM(freelist_create)( &entry->freelist, entry->buf_count );
for ( i = 0 ; i < entry->buf_count ; i++ ) {
DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
}
#endif
up( &dev->struct_sem );
up( &dev->struct_sem );
request.count = entry->buf_count;
request.size = size;
request.count = entry->buf_count;
request.size = size;
if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
return -EFAULT;
if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
return -EFAULT;
dma->flags = _DRM_DMA_USE_SG;
dma->flags = _DRM_DMA_USE_SG;
atomic_dec( &dev->buf_alloc );
return 0;
atomic_dec( &dev->buf_alloc );
return 0;
}
#endif /* __HAVE_SG */

View File

@ -70,13 +70,20 @@ int DRM(ctxbitmap_next)( drm_device_t *dev )
if((bit+1) > dev->max_context) {
dev->max_context = (bit+1);
if(dev->context_sareas) {
dev->context_sareas = DRM(realloc)(
dev->context_sareas,
(dev->max_context - 1) *
sizeof(*dev->context_sareas),
dev->max_context *
sizeof(*dev->context_sareas),
DRM_MEM_MAPS);
drm_map_t **ctx_sareas;
ctx_sareas = DRM(realloc)(dev->context_sareas,
(dev->max_context - 1) *
sizeof(*dev->context_sareas),
dev->max_context *
sizeof(*dev->context_sareas),
DRM_MEM_MAPS);
if(!ctx_sareas) {
clear_bit(bit, dev->ctx_bitmap);
up(&dev->struct_sem);
return -1;
}
dev->context_sareas = ctx_sareas;
dev->context_sareas[bit] = NULL;
} else {
/* max_context == 1 at this point */
@ -84,6 +91,11 @@ int DRM(ctxbitmap_next)( drm_device_t *dev )
dev->max_context *
sizeof(*dev->context_sareas),
DRM_MEM_MAPS);
if(!dev->context_sareas) {
clear_bit(bit, dev->ctx_bitmap);
up(&dev->struct_sem);
return -1;
}
dev->context_sareas[bit] = NULL;
}
}

View File

@ -84,6 +84,9 @@
#ifndef __HAVE_SG
#define __HAVE_SG 0
#endif
#ifndef __HAVE_KERNEL_CTX_SWITCH
#define __HAVE_KERNEL_CTX_SWITCH 0
#endif
#ifndef DRIVER_PREINIT
#define DRIVER_PREINIT()
@ -97,9 +100,47 @@
#ifndef DRIVER_PRETAKEDOWN
#define DRIVER_PRETAKEDOWN()
#endif
#ifndef DRIVER_POSTCLEANUP
#define DRIVER_POSTCLEANUP()
#endif
#ifndef DRIVER_PRESETUP
#define DRIVER_PRESETUP()
#endif
#ifndef DRIVER_POSTSETUP
#define DRIVER_POSTSETUP()
#endif
#ifndef DRIVER_IOCTLS
#define DRIVER_IOCTLS
#endif
#ifndef DRIVER_FOPS
#if LINUX_VERSION_CODE >= 0x020400
#define DRIVER_FOPS \
static struct file_operations DRM(fops) = { \
owner: THIS_MODULE, \
open: DRM(open), \
flush: DRM(flush), \
release: DRM(release), \
ioctl: DRM(ioctl), \
mmap: DRM(mmap), \
read: DRM(read), \
fasync: DRM(fasync), \
poll: DRM(poll), \
}
#else
#define DRIVER_FOPS \
static struct file_operations DRM(fops) = { \
open: DRM(open), \
flush: DRM(flush), \
release: DRM(release), \
ioctl: DRM(ioctl), \
mmap: DRM(mmap), \
read: DRM(read), \
fasync: DRM(fasync), \
poll: DRM(poll), \
}
#endif
#endif
/*
* The default number of instances (minor numbers) to initialize.
@ -112,21 +153,7 @@ static drm_device_t *DRM(device);
static int *DRM(minor);
static int DRM(numdevs) = 0;
static struct file_operations DRM(fops) = {
#if LINUX_VERSION_CODE >= 0x020400
/* This started being used during 2.4.0-test */
owner: THIS_MODULE,
#endif
open: DRM(open),
flush: DRM(flush),
release: DRM(release),
ioctl: DRM(ioctl),
mmap: DRM(mmap),
read: DRM(read),
fasync: DRM(fasync),
poll: DRM(poll),
};
DRIVER_FOPS;
static drm_ioctl_desc_t DRM(ioctls)[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { DRM(version), 0, 0 },
@ -212,6 +239,7 @@ static int DRM(setup)( drm_device_t *dev )
{
int i;
DRIVER_PRESETUP();
atomic_set( &dev->ioctl_count, 0 );
atomic_set( &dev->vma_count, 0 );
dev->buf_use = 0;
@ -311,6 +339,7 @@ static int DRM(setup)( drm_device_t *dev )
* drm_select_queue fails between the time the interrupt is
* initialized and the time the queues are initialized.
*/
DRIVER_POSTSETUP();
return 0;
}
@ -320,7 +349,7 @@ static int DRM(takedown)( drm_device_t *dev )
drm_magic_entry_t *pt, *next;
drm_map_t *map;
drm_map_list_t *r_list;
struct list_head *list;
struct list_head *list, *list_next;
drm_vma_entry_t *vma, *vma_next;
int i;
@ -388,7 +417,10 @@ static int DRM(takedown)( drm_device_t *dev )
}
if( dev->maplist ) {
list_for_each(list, &dev->maplist->head) {
for(list = dev->maplist->head.next;
list != &dev->maplist->head;
list = list_next) {
list_next = list->next;
r_list = (drm_map_list_t *)list;
map = r_list->map;
DRM(free)(r_list, sizeof(*r_list), DRM_MEM_MAPS);
@ -635,6 +667,7 @@ static void __exit drm_cleanup( void )
}
#endif
}
DRIVER_POSTCLEANUP();
kfree(DRM(minor));
kfree(DRM(device));
DRM(numdevs) = 0;
@ -978,6 +1011,12 @@ int DRM(lock)( struct inode *inode, struct file *filp,
if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
DRIVER_DMA_QUIESCENT();
}
#endif
#if __HAVE_KERNEL_CTX_SWITCH
if ( dev->last_context != lock.context ) {
DRM(context_switch)(dev, dev->last_context,
lock.context);
}
#endif
}

View File

@ -47,6 +47,8 @@ int DRM(open_helper)(struct inode *inode, struct file *filp, drm_device_t *dev)
DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
priv = DRM(alloc)(sizeof(*priv), DRM_MEM_FILES);
if(!priv) return -ENOMEM;
memset(priv, 0, sizeof(*priv));
filp->private_data = priv;
priv->uid = current->euid;

View File

@ -76,23 +76,26 @@ int DRM(setunique)(struct inode *inode, struct file *filp,
drm_device_t *dev = priv->dev;
drm_unique_t u;
if (dev->unique_len || dev->unique)
return -EBUSY;
if (dev->unique_len || dev->unique) return -EBUSY;
if (copy_from_user(&u, (drm_unique_t *)arg, sizeof(u)))
return -EFAULT;
if (copy_from_user(&u, (drm_unique_t *)arg, sizeof(u))) return -EFAULT;
if (!u.unique_len || u.unique_len > 1024)
return -EINVAL;
if (!u.unique_len || u.unique_len > 1024) return -EINVAL;
dev->unique_len = u.unique_len;
dev->unique = DRM(alloc)(u.unique_len + 1, DRM_MEM_DRIVER);
if(!dev->unique) return -ENOMEM;
if (copy_from_user(dev->unique, u.unique, dev->unique_len))
return -EFAULT;
dev->unique[dev->unique_len] = '\0';
dev->devname = DRM(alloc)(strlen(dev->name) + strlen(dev->unique) + 2,
DRM_MEM_DRIVER);
if(!dev->devname) {
DRM(free)(dev->devname, sizeof(*dev->devname), DRM_MEM_DRIVER);
return -ENOMEM;
}
sprintf(dev->devname, "%s@%s", dev->name, dev->unique);
#ifdef __alpha__

View File

@ -93,6 +93,7 @@ int DRM(sg_alloc)( struct inode *inode, struct file *filp,
DRM(free)( entry, sizeof(*entry), DRM_MEM_SGLISTS );
return -ENOMEM;
}
memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist));
entry->virtual = vmalloc_32( pages << PAGE_SHIFT );
if ( !entry->virtual ) {

View File

@ -84,6 +84,7 @@ static int DRM(stub_getminor)(const char *name, struct file_operations *fops,
if (!DRM(stub_list)) {
DRM(stub_list) = DRM(alloc)(sizeof(*DRM(stub_list))
* DRM_STUB_MAXCARDS, DRM_MEM_STUB);
if(!DRM(stub_list)) return -1;
for (i = 0; i < DRM_STUB_MAXCARDS; i++) {
DRM(stub_list)[i].name = NULL;
DRM(stub_list)[i].fops = NULL;

View File

@ -126,7 +126,7 @@ void DRM(vm_shm_close)(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
drm_vma_entry_t *pt, *prev;
drm_vma_entry_t *pt, *prev, *next;
drm_map_t *map;
drm_map_list_t *r_list;
struct list_head *list;
@ -146,7 +146,8 @@ void DRM(vm_shm_close)(struct vm_area_struct *vma)
#endif
down(&dev->struct_sem);
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
next = pt->next;
#if LINUX_VERSION_CODE >= 0x020300
if (pt->vma->vm_private_data == map) found_maps++;
#else
@ -159,6 +160,8 @@ void DRM(vm_shm_close)(struct vm_area_struct *vma)
dev->vmalist = pt->next;
}
DRM(free)(pt, sizeof(*pt), DRM_MEM_VMAS);
} else {
prev = pt;
}
}
/* We were the only map that was found */
@ -365,6 +368,19 @@ int DRM(mmap_dma)(struct file *filp, struct vm_area_struct *vma)
return 0;
}
#ifndef DRIVER_GET_MAP_OFS
#define DRIVER_GET_MAP_OFS() (map->offset)
#endif
#ifndef DRIVER_GET_REG_OFS
#ifdef __alpha__
#define DRIVER_GET_REG_OFS() (dev->hose->dense_mem_base - \
dev->hose->mem_space->start)
#else
#define DRIVER_GET_REG_OFS() 0
#endif
#endif
int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
{
drm_file_t *priv = filp->private_data;
@ -389,10 +405,13 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
for performance, even if the list was a
bit longer. */
list_for_each(list, &dev->maplist->head) {
unsigned long off;
r_list = (drm_map_list_t *)list;
map = r_list->map;
if (!map) continue;
if (map->offset == VM_OFFSET(vma)) break;
off = DRIVER_GET_MAP_OFS();
if (off == VM_OFFSET(vma)) break;
}
if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
@ -433,10 +452,7 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
#endif
vma->vm_flags |= VM_IO; /* not in core dump */
}
#ifdef __alpha__
offset = dev->hose->dense_mem_base -
dev->hose->mem_space->start;
#endif
offset = DRIVER_GET_REG_OFS();
if (remap_page_range(vma->vm_start,
VM_OFFSET(vma) + offset,
vma->vm_end - vma->vm_start,

View File

@ -379,10 +379,9 @@ static void i810_kernel_lost_context(drm_device_t *dev)
if (ring->space < 0) ring->space += ring->Size;
}
static int i810_freelist_init(drm_device_t *dev)
static int i810_freelist_init(drm_device_t *dev, drm_i810_private_t *dev_priv)
{
drm_device_dma_t *dma = dev->dma;
drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
int my_idx = 24;
u32 *hw_status = (u32 *)(dev_priv->hw_status_page + my_idx);
int i;
@ -414,7 +413,6 @@ static int i810_dma_initialize(drm_device_t *dev,
{
struct list_head *list;
dev->dev_private = (void *) dev_priv;
memset(dev_priv, 0, sizeof(drm_i810_private_t));
list_for_each(list, &dev->maplist->head) {
@ -426,9 +424,26 @@ static int i810_dma_initialize(drm_device_t *dev,
break;
}
}
if(!dev_priv->sarea_map) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("can not find sarea!\n");
return -EINVAL;
}
DRM_FIND_MAP( dev_priv->mmio_map, init->mmio_offset );
if(!dev_priv->mmio_map) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("can not find mmio map!\n");
return -EINVAL;
}
DRM_FIND_MAP( dev_priv->buffer_map, init->buffers_offset );
if(!dev_priv->buffer_map) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("can not find dma buffer map!\n");
return -EINVAL;
}
dev_priv->sarea_priv = (drm_i810_sarea_t *)
((u8 *)dev_priv->sarea_map->handle +
@ -445,15 +460,16 @@ static int i810_dma_initialize(drm_device_t *dev,
init->ring_start,
init->ring_size);
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
if (dev_priv->ring.virtual_start == NULL) {
dev->dev_private = (void *) dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
}
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
dev_priv->w = init->w;
dev_priv->h = init->h;
dev_priv->pitch = init->pitch;
@ -464,27 +480,30 @@ static int i810_dma_initialize(drm_device_t *dev,
dev_priv->back_di1 = init->back_offset | init->pitch_bits;
dev_priv->zi1 = init->depth_offset | init->pitch_bits;
/* Program Hardware Status Page */
dev_priv->hw_status_page = i810_alloc_page(dev);
memset((void *) dev_priv->hw_status_page, 0, PAGE_SIZE);
if(dev_priv->hw_status_page == 0UL) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
memset((void *) dev_priv->hw_status_page, 0, PAGE_SIZE);
DRM_DEBUG("hw status page @ %lx\n", dev_priv->hw_status_page);
I810_WRITE(0x02080, virt_to_bus((void *)dev_priv->hw_status_page));
DRM_DEBUG("Enabled hardware status page\n");
/* Now we need to init our freelist */
if(i810_freelist_init(dev) != 0) {
if(i810_freelist_init(dev, dev_priv) != 0) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("Not enough space in the status page for"
" the freelist\n");
return -ENOMEM;
}
dev->dev_private = (void *)dev_priv;
return 0;
}

View File

@ -41,8 +41,8 @@
#define DRIVER_DESC "Intel i810"
#define DRIVER_DATE "20010616"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 0
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 1
#define DRIVER_PATCHLEVEL 0
#define DRIVER_IOCTLS \

View File

@ -41,9 +41,9 @@
#define DRIVER_DESC "ATI Rage 128"
#define DRIVER_DATE "20010405"
#define DRIVER_MAJOR 3
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
#define DRIVER_PATCHLEVEL 6
#define DRIVER_IOCTLS \
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { r128_cce_buffers, 1, 0 }, \

16
linux/Config.in Normal file
View File

@ -0,0 +1,16 @@
#
# drm device configuration
#
# This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
#
bool 'Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)' CONFIG_DRM
if [ "$CONFIG_DRM" != "n" ]; then
tristate ' 3dfx Banshee/Voodoo3+' CONFIG_DRM_TDFX
tristate ' 3dlabs GMX 2000' CONFIG_DRM_GAMMA
tristate ' ATI Rage 128' CONFIG_DRM_R128
dep_tristate ' ATI Radeon' CONFIG_DRM_RADEON $CONFIG_AGP
dep_tristate ' Intel I810' CONFIG_DRM_I810 $CONFIG_AGP
dep_tristate ' Matrox g200/g400' CONFIG_DRM_MGA $CONFIG_AGP
fi

View File

@ -1,91 +1,30 @@
#
# Makefile for the drm device driver. This driver provides support for
# the Direct Rendering Infrastructure (DRI) in XFree86 4.x.
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
# drm.o is a fake target -- it is never built
# The real targets are in the module-list
O_TARGET := drm.o
export-objs := gamma_drv.o tdfx_drv.o r128_drv.o mga_drv.o i810_drv.o \
ffb_drv.o
list-multi := gamma.o tdfx.o r128.o mga.o i810.o ffb.o
module-list := gamma.o tdfx.o r128.o radeon.o ffb.o mga.o i810.o
export-objs := $(patsubst %.o,%_drv.o,$(module-list))
gamma-objs := gamma_drv.o gamma_dma.o
tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o
i810-objs := i810_drv.o i810_dma.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o
ffb-objs := ffb_drv.o ffb_context.o
# libs-objs are included in every module so that radical changes to the
# architecture of the DRM support library can be made at a later time.
#
# The downside is that each module is larger, and a system that uses
# more than one module (i.e., a dual-head system) will use more memory
# (but a system that uses exactly one module will use the same amount of
# memory).
#
# The upside is that if the DRM support library ever becomes insufficient
# for new families of cards, a new library can be implemented for those new
# cards without impacting the drivers for the old cards. This is significant,
# because testing architectural changes to old cards may be impossible, and
# may delay the implementation of a better architecture. We've traded slight
# memory waste (in the dual-head case) for greatly improved long-term
# maintainability.
#
# NOTE: lib-objs will be eliminated in future versions, thereby
# eliminating the need to compile the .o files into every module, but
# for now we still need them.
#
lib-objs := init.o memory.o proc.o auth.o context.o drawable.o bufs.o
lib-objs += lists.o lock.o ioctl.o fops.o vm.o dma.o ctxbitmap.o
ifeq ($(CONFIG_AGP),y)
lib-objs += agpsupport.o
else
ifeq ($(CONFIG_AGP),m)
lib-objs += agpsupport.o
endif
endif
gamma-objs := gamma_drv.o gamma_dma.o
tdfx-objs := tdfx_drv.o tdfx_context.o
r128-objs := r128_drv.o r128_cce.o r128_context.o r128_bufs.o \
r128_state.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_context.o radeon_bufs.o \
radeon_state.o
ffb-objs := ffb_drv.o ffb_context.o
mga-objs := mga_drv.o mga_dma.o mga_context.o mga_bufs.o \
mga_state.o
i810-objs := i810_drv.o i810_dma.o i810_context.o i810_bufs.o
obj-$(CONFIG_DRM_GAMMA) += gamma.o
obj-$(CONFIG_DRM_TDFX) += tdfx.o
obj-$(CONFIG_DRM_R128) += r128.o
obj-$(CONFIG_DRM_RADEON) += radeon.o
obj-$(CONFIG_DRM_FFB) += ffb.o
obj-$(CONFIG_DRM_MGA) += mga.o
obj-$(CONFIG_DRM_I810) += i810.o
# When linking into the kernel, link the library just once.
# If making modules, we include the library into each module
lib-objs-mod := $(patsubst %.o,%-mod.o,$(lib-objs))
ifdef MAKING_MODULES
lib = drmlib-mod.a
else
obj-y += drmlib.a
endif
obj-$(CONFIG_DRM_GAMMA) += gamma.o
obj-$(CONFIG_DRM_TDFX) += tdfx.o
obj-$(CONFIG_DRM_R128) += r128.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o
obj-$(CONFIG_DRM_MGA) += mga.o
obj-$(CONFIG_DRM_I810) += i810.o
obj-$(CONFIG_DRM_FFB) += ffb.o
include $(TOPDIR)/Rules.make
$(patsubst %.o,%.c,$(lib-objs-mod)):
@ln -sf $(subst -mod,,$@) $@
drmlib-mod.a: $(lib-objs-mod)
rm -f $@
$(AR) $(EXTRA_ARFLAGS) rcs $@ $(lib-objs-mod)
drmlib.a: $(lib-objs)
rm -f $@
$(AR) $(EXTRA_ARFLAGS) rcs $@ $(lib-objs)
gamma.o: $(gamma-objs) $(lib)
$(LD) -r -o $@ $(gamma-objs) $(lib)

View File

@ -266,6 +266,46 @@ int DRM(rmmap)(struct inode *inode, struct file *filp,
#if __HAVE_DMA
static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry)
{
int i;
if (entry->seg_count) {
for (i = 0; i < entry->seg_count; i++) {
DRM(free_pages)(entry->seglist[i],
entry->page_order,
DRM_MEM_DMA);
}
DRM(free)(entry->seglist,
entry->seg_count *
sizeof(*entry->seglist),
DRM_MEM_SEGS);
entry->seg_count = 0;
}
if(entry->buf_count) {
for(i = 0; i < entry->buf_count; i++) {
if(entry->buflist[i].dev_private) {
DRM(free)(entry->buflist[i].dev_private,
entry->buflist[i].dev_priv_size,
DRM_MEM_BUFS);
}
}
DRM(free)(entry->buflist,
entry->buf_count *
sizeof(*entry->buflist),
DRM_MEM_BUFS);
#if __HAVE_DMA_FREELIST
DRM(freelist_destroy)(&entry->freelist);
#endif
entry->buf_count = 0;
}
}
#if __REALLY_HAVE_AGP
int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
@ -286,6 +326,7 @@ int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
int total;
int byte_count;
int i;
drm_buf_t **temp_buflist;
if ( !dma ) return -EINVAL;
@ -371,6 +412,11 @@ int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
DRM_MEM_BUFS );
if(!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
DRM(cleanup_buf_error)(entry);
}
memset( buf->dev_private, 0, buf->dev_priv_size );
#if __HAVE_DMA_HISTOGRAM
@ -389,11 +435,20 @@ int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
DRM_DEBUG( "byte_count: %d\n", byte_count );
dma->buflist = DRM(realloc)( dma->buflist,
temp_buflist = DRM(realloc)( dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist),
DRM_MEM_BUFS );
if(!temp_buflist) {
/* Free the entry because it isn't valid */
DRM(cleanup_buf_error)(entry);
up( &dev->struct_sem );
atomic_dec( &dev->buf_alloc );
return -ENOMEM;
}
dma->buflist = temp_buflist;
for ( i = 0 ; i < entry->buf_count ; i++ ) {
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
@ -446,6 +501,8 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
int i;
int byte_count;
int page_count;
unsigned long *temp_pagelist;
drm_buf_t **temp_buflist;
if ( !dma ) return -EINVAL;
@ -512,11 +569,24 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
}
memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
dma->pagelist = DRM(realloc)( dma->pagelist,
temp_pagelist = DRM(realloc)( dma->pagelist,
dma->page_count * sizeof(*dma->pagelist),
(dma->page_count + (count << page_order))
* sizeof(*dma->pagelist),
DRM_MEM_PAGES );
if(!temp_pagelist) {
DRM(free)( entry->buflist,
count * sizeof(*entry->buflist),
DRM_MEM_BUFS );
DRM(free)( entry->seglist,
count * sizeof(*entry->seglist),
DRM_MEM_SEGS );
up( &dev->struct_sem );
atomic_dec( &dev->buf_alloc );
return -ENOMEM;
}
dma->pagelist = temp_pagelist;
DRM_DEBUG( "pagelist: %d entries\n",
dma->page_count + (count << page_order) );
@ -563,11 +633,20 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
byte_count += PAGE_SIZE << page_order;
}
dma->buflist = DRM(realloc)( dma->buflist,
temp_buflist = DRM(realloc)( dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist),
DRM_MEM_BUFS );
if(!temp_buflist) {
/* Free the entry because it isn't valid */
DRM(cleanup_buf_error)(entry);
up( &dev->struct_sem );
atomic_dec( &dev->buf_alloc );
return -ENOMEM;
}
dma->buflist = temp_buflist;
for ( i = 0 ; i < entry->buf_count ; i++ ) {
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
@ -601,67 +680,68 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
int DRM(addbufs_sg)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
drm_buf_entry_t *entry;
drm_buf_t *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
int order;
int size;
int alignment;
int page_order;
int total;
int byte_count;
int i;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
drm_buf_entry_t *entry;
drm_buf_t *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
int order;
int size;
int alignment;
int page_order;
int total;
int byte_count;
int i;
drm_buf_t **temp_buflist;
if ( !dma ) return -EINVAL;
if ( !dma ) return -EINVAL;
if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
sizeof(request) ) )
return -EFAULT;
if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
sizeof(request) ) )
return -EFAULT;
count = request.count;
order = DRM(order)( request.size );
size = 1 << order;
count = request.count;
order = DRM(order)( request.size );
size = 1 << order;
alignment = (request.flags & _DRM_PAGE_ALIGN)
? PAGE_ALIGN(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
alignment = (request.flags & _DRM_PAGE_ALIGN)
? PAGE_ALIGN(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
byte_count = 0;
agp_offset = request.agp_start;
byte_count = 0;
agp_offset = request.agp_start;
DRM_DEBUG( "count: %d\n", count );
DRM_DEBUG( "order: %d\n", order );
DRM_DEBUG( "size: %d\n", size );
DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
DRM_DEBUG( "alignment: %d\n", alignment );
DRM_DEBUG( "page_order: %d\n", page_order );
DRM_DEBUG( "total: %d\n", total );
DRM_DEBUG( "count: %d\n", count );
DRM_DEBUG( "order: %d\n", order );
DRM_DEBUG( "size: %d\n", size );
DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
DRM_DEBUG( "alignment: %d\n", alignment );
DRM_DEBUG( "page_order: %d\n", page_order );
DRM_DEBUG( "total: %d\n", total );
if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
if ( dev->queue_count ) return -EBUSY; /* Not while in use */
if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
if ( dev->queue_count ) return -EBUSY; /* Not while in use */
spin_lock( &dev->count_lock );
if ( dev->buf_use ) {
spin_unlock( &dev->count_lock );
return -EBUSY;
}
atomic_inc( &dev->buf_alloc );
spin_unlock( &dev->count_lock );
spin_lock( &dev->count_lock );
if ( dev->buf_use ) {
spin_unlock( &dev->count_lock );
return -EBUSY;
}
atomic_inc( &dev->buf_alloc );
spin_unlock( &dev->count_lock );
down( &dev->struct_sem );
entry = &dma->bufs[order];
if ( entry->buf_count ) {
up( &dev->struct_sem );
atomic_dec( &dev->buf_alloc );
return -ENOMEM; /* May only call once for each order */
}
down( &dev->struct_sem );
entry = &dma->bufs[order];
if ( entry->buf_count ) {
up( &dev->struct_sem );
atomic_dec( &dev->buf_alloc );
return -ENOMEM; /* May only call once for each order */
}
if (count < 0 || count > 4096) {
up( &dev->struct_sem );
@ -669,90 +749,108 @@ int DRM(addbufs_sg)( struct inode *inode, struct file *filp,
return -EINVAL;
}
entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
DRM_MEM_BUFS );
if ( !entry->buflist ) {
up( &dev->struct_sem );
atomic_dec( &dev->buf_alloc );
return -ENOMEM;
}
memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
DRM_MEM_BUFS );
if ( !entry->buflist ) {
up( &dev->struct_sem );
atomic_dec( &dev->buf_alloc );
return -ENOMEM;
}
memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
entry->buf_size = size;
entry->page_order = page_order;
entry->buf_size = size;
entry->page_order = page_order;
offset = 0;
offset = 0;
while ( entry->buf_count < count ) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
while ( entry->buf_count < count ) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
buf->offset = (dma->byte_count + offset);
buf->bus_address = agp_offset + offset;
buf->address = (void *)(agp_offset + offset + dev->sg->handle);
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
init_waitqueue_head( &buf->dma_wait );
buf->pid = 0;
buf->offset = (dma->byte_count + offset);
buf->bus_address = agp_offset + offset;
buf->address = (void *)(agp_offset + offset + dev->sg->handle);
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
init_waitqueue_head( &buf->dma_wait );
buf->pid = 0;
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
DRM_MEM_BUFS );
memset( buf->dev_private, 0, buf->dev_priv_size );
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
DRM_MEM_BUFS );
if(!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
DRM(cleanup_buf_error)(entry);
up( &dev->struct_sem );
atomic_dec( &dev->buf_alloc );
return -ENOMEM;
}
#if __HAVE_DMA_HISTOGRAM
buf->time_queued = 0;
buf->time_dispatched = 0;
buf->time_completed = 0;
buf->time_freed = 0;
#endif
DRM_DEBUG( "buffer %d @ %p\n",
entry->buf_count, buf->address );
memset( buf->dev_private, 0, buf->dev_priv_size );
offset += alignment;
entry->buf_count++;
byte_count += PAGE_SIZE << page_order;
}
# if __HAVE_DMA_HISTOGRAM
buf->time_queued = 0;
buf->time_dispatched = 0;
buf->time_completed = 0;
buf->time_freed = 0;
# endif
DRM_DEBUG( "buffer %d @ %p\n",
entry->buf_count, buf->address );
DRM_DEBUG( "byte_count: %d\n", byte_count );
offset += alignment;
entry->buf_count++;
byte_count += PAGE_SIZE << page_order;
}
dma->buflist = DRM(realloc)( dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist),
DRM_MEM_BUFS );
for ( i = 0 ; i < entry->buf_count ; i++ ) {
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
DRM_DEBUG( "byte_count: %d\n", byte_count );
dma->buf_count += entry->buf_count;
dma->byte_count += byte_count;
temp_buflist = DRM(realloc)( dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist),
DRM_MEM_BUFS );
if(!temp_buflist) {
/* Free the entry because it isn't valid */
DRM(cleanup_buf_error)(entry);
up( &dev->struct_sem );
atomic_dec( &dev->buf_alloc );
return -ENOMEM;
}
dma->buflist = temp_buflist;
DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
for ( i = 0 ; i < entry->buf_count ; i++ ) {
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
dma->buf_count += entry->buf_count;
dma->byte_count += byte_count;
DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
#if __HAVE_DMA_FREELIST
DRM(freelist_create)( &entry->freelist, entry->buf_count );
for ( i = 0 ; i < entry->buf_count ; i++ ) {
DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
}
DRM(freelist_create)( &entry->freelist, entry->buf_count );
for ( i = 0 ; i < entry->buf_count ; i++ ) {
DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
}
#endif
up( &dev->struct_sem );
up( &dev->struct_sem );
request.count = entry->buf_count;
request.size = size;
request.count = entry->buf_count;
request.size = size;
if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
return -EFAULT;
if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
return -EFAULT;
dma->flags = _DRM_DMA_USE_SG;
dma->flags = _DRM_DMA_USE_SG;
atomic_dec( &dev->buf_alloc );
return 0;
atomic_dec( &dev->buf_alloc );
return 0;
}
#endif /* __HAVE_SG */

View File

@ -70,13 +70,20 @@ int DRM(ctxbitmap_next)( drm_device_t *dev )
if((bit+1) > dev->max_context) {
dev->max_context = (bit+1);
if(dev->context_sareas) {
dev->context_sareas = DRM(realloc)(
dev->context_sareas,
(dev->max_context - 1) *
sizeof(*dev->context_sareas),
dev->max_context *
sizeof(*dev->context_sareas),
DRM_MEM_MAPS);
drm_map_t **ctx_sareas;
ctx_sareas = DRM(realloc)(dev->context_sareas,
(dev->max_context - 1) *
sizeof(*dev->context_sareas),
dev->max_context *
sizeof(*dev->context_sareas),
DRM_MEM_MAPS);
if(!ctx_sareas) {
clear_bit(bit, dev->ctx_bitmap);
up(&dev->struct_sem);
return -1;
}
dev->context_sareas = ctx_sareas;
dev->context_sareas[bit] = NULL;
} else {
/* max_context == 1 at this point */
@ -84,6 +91,11 @@ int DRM(ctxbitmap_next)( drm_device_t *dev )
dev->max_context *
sizeof(*dev->context_sareas),
DRM_MEM_MAPS);
if(!dev->context_sareas) {
clear_bit(bit, dev->ctx_bitmap);
up(&dev->struct_sem);
return -1;
}
dev->context_sareas[bit] = NULL;
}
}

View File

@ -84,6 +84,9 @@
#ifndef __HAVE_SG
#define __HAVE_SG 0
#endif
#ifndef __HAVE_KERNEL_CTX_SWITCH
#define __HAVE_KERNEL_CTX_SWITCH 0
#endif
#ifndef DRIVER_PREINIT
#define DRIVER_PREINIT()
@ -97,9 +100,47 @@
#ifndef DRIVER_PRETAKEDOWN
#define DRIVER_PRETAKEDOWN()
#endif
#ifndef DRIVER_POSTCLEANUP
#define DRIVER_POSTCLEANUP()
#endif
#ifndef DRIVER_PRESETUP
#define DRIVER_PRESETUP()
#endif
#ifndef DRIVER_POSTSETUP
#define DRIVER_POSTSETUP()
#endif
#ifndef DRIVER_IOCTLS
#define DRIVER_IOCTLS
#endif
#ifndef DRIVER_FOPS
#if LINUX_VERSION_CODE >= 0x020400
#define DRIVER_FOPS \
static struct file_operations DRM(fops) = { \
owner: THIS_MODULE, \
open: DRM(open), \
flush: DRM(flush), \
release: DRM(release), \
ioctl: DRM(ioctl), \
mmap: DRM(mmap), \
read: DRM(read), \
fasync: DRM(fasync), \
poll: DRM(poll), \
}
#else
#define DRIVER_FOPS \
static struct file_operations DRM(fops) = { \
open: DRM(open), \
flush: DRM(flush), \
release: DRM(release), \
ioctl: DRM(ioctl), \
mmap: DRM(mmap), \
read: DRM(read), \
fasync: DRM(fasync), \
poll: DRM(poll), \
}
#endif
#endif
/*
* The default number of instances (minor numbers) to initialize.
@ -112,21 +153,7 @@ static drm_device_t *DRM(device);
static int *DRM(minor);
static int DRM(numdevs) = 0;
static struct file_operations DRM(fops) = {
#if LINUX_VERSION_CODE >= 0x020400
/* This started being used during 2.4.0-test */
owner: THIS_MODULE,
#endif
open: DRM(open),
flush: DRM(flush),
release: DRM(release),
ioctl: DRM(ioctl),
mmap: DRM(mmap),
read: DRM(read),
fasync: DRM(fasync),
poll: DRM(poll),
};
DRIVER_FOPS;
static drm_ioctl_desc_t DRM(ioctls)[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { DRM(version), 0, 0 },
@ -212,6 +239,7 @@ static int DRM(setup)( drm_device_t *dev )
{
int i;
DRIVER_PRESETUP();
atomic_set( &dev->ioctl_count, 0 );
atomic_set( &dev->vma_count, 0 );
dev->buf_use = 0;
@ -311,6 +339,7 @@ static int DRM(setup)( drm_device_t *dev )
* drm_select_queue fails between the time the interrupt is
* initialized and the time the queues are initialized.
*/
DRIVER_POSTSETUP();
return 0;
}
@ -320,7 +349,7 @@ static int DRM(takedown)( drm_device_t *dev )
drm_magic_entry_t *pt, *next;
drm_map_t *map;
drm_map_list_t *r_list;
struct list_head *list;
struct list_head *list, *list_next;
drm_vma_entry_t *vma, *vma_next;
int i;
@ -388,7 +417,10 @@ static int DRM(takedown)( drm_device_t *dev )
}
if( dev->maplist ) {
list_for_each(list, &dev->maplist->head) {
for(list = dev->maplist->head.next;
list != &dev->maplist->head;
list = list_next) {
list_next = list->next;
r_list = (drm_map_list_t *)list;
map = r_list->map;
DRM(free)(r_list, sizeof(*r_list), DRM_MEM_MAPS);
@ -635,6 +667,7 @@ static void __exit drm_cleanup( void )
}
#endif
}
DRIVER_POSTCLEANUP();
kfree(DRM(minor));
kfree(DRM(device));
DRM(numdevs) = 0;
@ -978,6 +1011,12 @@ int DRM(lock)( struct inode *inode, struct file *filp,
if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
DRIVER_DMA_QUIESCENT();
}
#endif
#if __HAVE_KERNEL_CTX_SWITCH
if ( dev->last_context != lock.context ) {
DRM(context_switch)(dev, dev->last_context,
lock.context);
}
#endif
}

View File

@ -47,6 +47,8 @@ int DRM(open_helper)(struct inode *inode, struct file *filp, drm_device_t *dev)
DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
priv = DRM(alloc)(sizeof(*priv), DRM_MEM_FILES);
if(!priv) return -ENOMEM;
memset(priv, 0, sizeof(*priv));
filp->private_data = priv;
priv->uid = current->euid;

View File

@ -76,23 +76,26 @@ int DRM(setunique)(struct inode *inode, struct file *filp,
drm_device_t *dev = priv->dev;
drm_unique_t u;
if (dev->unique_len || dev->unique)
return -EBUSY;
if (dev->unique_len || dev->unique) return -EBUSY;
if (copy_from_user(&u, (drm_unique_t *)arg, sizeof(u)))
return -EFAULT;
if (copy_from_user(&u, (drm_unique_t *)arg, sizeof(u))) return -EFAULT;
if (!u.unique_len || u.unique_len > 1024)
return -EINVAL;
if (!u.unique_len || u.unique_len > 1024) return -EINVAL;
dev->unique_len = u.unique_len;
dev->unique = DRM(alloc)(u.unique_len + 1, DRM_MEM_DRIVER);
if(!dev->unique) return -ENOMEM;
if (copy_from_user(dev->unique, u.unique, dev->unique_len))
return -EFAULT;
dev->unique[dev->unique_len] = '\0';
dev->devname = DRM(alloc)(strlen(dev->name) + strlen(dev->unique) + 2,
DRM_MEM_DRIVER);
if(!dev->devname) {
DRM(free)(dev->devname, sizeof(*dev->devname), DRM_MEM_DRIVER);
return -ENOMEM;
}
sprintf(dev->devname, "%s@%s", dev->name, dev->unique);
#ifdef __alpha__

View File

@ -38,9 +38,12 @@ int DRM(waitlist_create)(drm_waitlist_t *bl, int count)
{
if (bl->count) return -EINVAL;
bl->count = count;
bl->bufs = DRM(alloc)((bl->count + 2) * sizeof(*bl->bufs),
DRM_MEM_BUFLISTS);
if(!bl->bufs) return -ENOMEM;
bl->count = count;
bl->rp = bl->bufs;
bl->wp = bl->bufs;
bl->end = &bl->bufs[bl->count+1];

View File

@ -93,6 +93,7 @@ int DRM(sg_alloc)( struct inode *inode, struct file *filp,
DRM(free)( entry, sizeof(*entry), DRM_MEM_SGLISTS );
return -ENOMEM;
}
memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist));
entry->virtual = vmalloc_32( pages << PAGE_SHIFT );
if ( !entry->virtual ) {

View File

@ -84,6 +84,7 @@ static int DRM(stub_getminor)(const char *name, struct file_operations *fops,
if (!DRM(stub_list)) {
DRM(stub_list) = DRM(alloc)(sizeof(*DRM(stub_list))
* DRM_STUB_MAXCARDS, DRM_MEM_STUB);
if(!DRM(stub_list)) return -1;
for (i = 0; i < DRM_STUB_MAXCARDS; i++) {
DRM(stub_list)[i].name = NULL;
DRM(stub_list)[i].fops = NULL;

View File

@ -126,7 +126,7 @@ void DRM(vm_shm_close)(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
drm_vma_entry_t *pt, *prev;
drm_vma_entry_t *pt, *prev, *next;
drm_map_t *map;
drm_map_list_t *r_list;
struct list_head *list;
@ -146,7 +146,8 @@ void DRM(vm_shm_close)(struct vm_area_struct *vma)
#endif
down(&dev->struct_sem);
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
next = pt->next;
#if LINUX_VERSION_CODE >= 0x020300
if (pt->vma->vm_private_data == map) found_maps++;
#else
@ -159,6 +160,8 @@ void DRM(vm_shm_close)(struct vm_area_struct *vma)
dev->vmalist = pt->next;
}
DRM(free)(pt, sizeof(*pt), DRM_MEM_VMAS);
} else {
prev = pt;
}
}
/* We were the only map that was found */
@ -365,6 +368,19 @@ int DRM(mmap_dma)(struct file *filp, struct vm_area_struct *vma)
return 0;
}
#ifndef DRIVER_GET_MAP_OFS
#define DRIVER_GET_MAP_OFS() (map->offset)
#endif
#ifndef DRIVER_GET_REG_OFS
#ifdef __alpha__
#define DRIVER_GET_REG_OFS() (dev->hose->dense_mem_base - \
dev->hose->mem_space->start)
#else
#define DRIVER_GET_REG_OFS() 0
#endif
#endif
int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
{
drm_file_t *priv = filp->private_data;
@ -389,10 +405,13 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
for performance, even if the list was a
bit longer. */
list_for_each(list, &dev->maplist->head) {
unsigned long off;
r_list = (drm_map_list_t *)list;
map = r_list->map;
if (!map) continue;
if (map->offset == VM_OFFSET(vma)) break;
off = DRIVER_GET_MAP_OFS();
if (off == VM_OFFSET(vma)) break;
}
if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
@ -433,10 +452,7 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
#endif
vma->vm_flags |= VM_IO; /* not in core dump */
}
#ifdef __alpha__
offset = dev->hose->dense_mem_base -
dev->hose->mem_space->start;
#endif
offset = DRIVER_GET_REG_OFS();
if (remap_page_range(vma->vm_start,
VM_OFFSET(vma) + offset,
vma->vm_end - vma->vm_start,

View File

@ -379,10 +379,9 @@ static void i810_kernel_lost_context(drm_device_t *dev)
if (ring->space < 0) ring->space += ring->Size;
}
static int i810_freelist_init(drm_device_t *dev)
static int i810_freelist_init(drm_device_t *dev, drm_i810_private_t *dev_priv)
{
drm_device_dma_t *dma = dev->dma;
drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
int my_idx = 24;
u32 *hw_status = (u32 *)(dev_priv->hw_status_page + my_idx);
int i;
@ -414,7 +413,6 @@ static int i810_dma_initialize(drm_device_t *dev,
{
struct list_head *list;
dev->dev_private = (void *) dev_priv;
memset(dev_priv, 0, sizeof(drm_i810_private_t));
list_for_each(list, &dev->maplist->head) {
@ -426,9 +424,26 @@ static int i810_dma_initialize(drm_device_t *dev,
break;
}
}
if(!dev_priv->sarea_map) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("can not find sarea!\n");
return -EINVAL;
}
DRM_FIND_MAP( dev_priv->mmio_map, init->mmio_offset );
if(!dev_priv->mmio_map) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("can not find mmio map!\n");
return -EINVAL;
}
DRM_FIND_MAP( dev_priv->buffer_map, init->buffers_offset );
if(!dev_priv->buffer_map) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("can not find dma buffer map!\n");
return -EINVAL;
}
dev_priv->sarea_priv = (drm_i810_sarea_t *)
((u8 *)dev_priv->sarea_map->handle +
@ -445,15 +460,16 @@ static int i810_dma_initialize(drm_device_t *dev,
init->ring_start,
init->ring_size);
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
if (dev_priv->ring.virtual_start == NULL) {
dev->dev_private = (void *) dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
}
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
dev_priv->w = init->w;
dev_priv->h = init->h;
dev_priv->pitch = init->pitch;
@ -464,27 +480,30 @@ static int i810_dma_initialize(drm_device_t *dev,
dev_priv->back_di1 = init->back_offset | init->pitch_bits;
dev_priv->zi1 = init->depth_offset | init->pitch_bits;
/* Program Hardware Status Page */
dev_priv->hw_status_page = i810_alloc_page(dev);
memset((void *) dev_priv->hw_status_page, 0, PAGE_SIZE);
if(dev_priv->hw_status_page == 0UL) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
memset((void *) dev_priv->hw_status_page, 0, PAGE_SIZE);
DRM_DEBUG("hw status page @ %lx\n", dev_priv->hw_status_page);
I810_WRITE(0x02080, virt_to_bus((void *)dev_priv->hw_status_page));
DRM_DEBUG("Enabled hardware status page\n");
/* Now we need to init our freelist */
if(i810_freelist_init(dev) != 0) {
if(i810_freelist_init(dev, dev_priv) != 0) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("Not enough space in the status page for"
" the freelist\n");
return -ENOMEM;
}
dev->dev_private = (void *)dev_priv;
return 0;
}

View File

@ -41,8 +41,8 @@
#define DRIVER_DESC "Intel i810"
#define DRIVER_DATE "20010616"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 0
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 1
#define DRIVER_PATCHLEVEL 0
#define DRIVER_IOCTLS \

View File

@ -292,10 +292,9 @@ static void mga_freelist_print( drm_device_t *dev )
}
#endif
static int mga_freelist_init( drm_device_t *dev )
static int mga_freelist_init( drm_device_t *dev, drm_mga_private_t *dev_priv )
{
drm_device_dma_t *dma = dev->dma;
drm_mga_private_t *dev_priv = dev->dev_private;
drm_buf_t *buf;
drm_mga_buf_priv_t *buf_priv;
drm_mga_freelist_t *entry;
@ -458,7 +457,6 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
dev_priv = DRM(alloc)( sizeof(drm_mga_private_t), DRM_MEM_DRIVER );
if ( !dev_priv )
return -ENOMEM;
dev->dev_private = (void *)dev_priv;
memset( dev_priv, 0, sizeof(drm_mga_private_t) );
@ -497,14 +495,63 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
break;
}
}
if(!dev_priv->sarea) {
DRM_ERROR( "failed to find sarea!\n" );
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return -EINVAL;
}
DRM_FIND_MAP( dev_priv->fb, init->fb_offset );
if(!dev_priv->fb) {
DRM_ERROR( "failed to find framebuffer!\n" );
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return -EINVAL;
}
DRM_FIND_MAP( dev_priv->mmio, init->mmio_offset );
if(!dev_priv->mmio) {
DRM_ERROR( "failed to find mmio region!\n" );
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return -EINVAL;
}
DRM_FIND_MAP( dev_priv->status, init->status_offset );
if(!dev_priv->status) {
DRM_ERROR( "failed to find status page!\n" );
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return -EINVAL;
}
DRM_FIND_MAP( dev_priv->warp, init->warp_offset );
if(!dev_priv->warp) {
DRM_ERROR( "failed to find warp microcode region!\n" );
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return -EINVAL;
}
DRM_FIND_MAP( dev_priv->primary, init->primary_offset );
if(!dev_priv->primary) {
DRM_ERROR( "failed to find primary dma region!\n" );
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return -EINVAL;
}
DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset );
if(!dev_priv->buffers) {
DRM_ERROR( "failed to find dma buffer region!\n" );
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return -EINVAL;
}
dev_priv->sarea_priv =
(drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle +
@ -514,16 +561,30 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
DRM_IOREMAP( dev_priv->primary );
DRM_IOREMAP( dev_priv->buffers );
ret = mga_warp_install_microcode( dev );
if(!dev_priv->warp->handle ||
!dev_priv->primary->handle ||
!dev_priv->buffers->handle ) {
DRM_ERROR( "failed to ioremap agp regions!\n" );
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return -ENOMEM;
}
ret = mga_warp_install_microcode( dev_priv );
if ( ret < 0 ) {
DRM_ERROR( "failed to install WARP ucode!\n" );
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return ret;
}
ret = mga_warp_init( dev );
ret = mga_warp_init( dev_priv );
if ( ret < 0 ) {
DRM_ERROR( "failed to init WARP engine!\n" );
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return ret;
}
@ -566,12 +627,16 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
dev_priv->sarea_priv->last_frame.head = 0;
dev_priv->sarea_priv->last_frame.wrap = 0;
if ( mga_freelist_init( dev ) < 0 ) {
if ( mga_freelist_init( dev, dev_priv ) < 0 ) {
DRM_ERROR( "could not initialize freelist\n" );
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return -ENOMEM;
}
/* Make dev_private visable to others. */
dev->dev_private = (void *)dev_priv;
return 0;
}

View File

@ -139,8 +139,8 @@ extern int mga_dma_blit( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
/* mga_warp.c */
extern int mga_warp_install_microcode( drm_device_t *dev );
extern int mga_warp_init( drm_device_t *dev );
extern int mga_warp_install_microcode( drm_mga_private_t *dev_priv );
extern int mga_warp_init( drm_mga_private_t *dev_priv );
#define mga_flush_write_combine() mb()

View File

@ -160,11 +160,8 @@ static int mga_warp_install_g200_microcode( drm_mga_private_t *dev_priv )
return 0;
}
int mga_warp_install_microcode( drm_device_t *dev )
int mga_warp_install_microcode( drm_mga_private_t *dev_priv )
{
drm_mga_private_t *dev_priv = dev->dev_private;
DRM_DEBUG( "%s\n", __FUNCTION__ );
switch ( dev_priv->chipset ) {
case MGA_CARD_TYPE_G400:
return mga_warp_install_g400_microcode( dev_priv );
@ -177,11 +174,9 @@ int mga_warp_install_microcode( drm_device_t *dev )
#define WMISC_EXPECTED (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE)
int mga_warp_init( drm_device_t *dev )
int mga_warp_init( drm_mga_private_t *dev_priv )
{
drm_mga_private_t *dev_priv = dev->dev_private;
u32 wmisc;
DRM_DEBUG( "%s\n", __FUNCTION__ );
/* FIXME: Get rid of these damned magic numbers...
*/

View File

@ -314,9 +314,9 @@ static int r128_do_engine_reset( drm_device_t *dev )
return 0;
}
static void r128_cce_init_ring_buffer( drm_device_t *dev )
static void r128_cce_init_ring_buffer( drm_device_t *dev,
drm_r128_private_t *dev_priv )
{
drm_r128_private_t *dev_priv = dev->dev_private;
u32 ring_start;
u32 tmp;
@ -383,17 +383,15 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
dev_priv = DRM(alloc)( sizeof(drm_r128_private_t), DRM_MEM_DRIVER );
if ( dev_priv == NULL )
return -ENOMEM;
dev->dev_private = (void *)dev_priv;
memset( dev_priv, 0, sizeof(drm_r128_private_t) );
dev_priv->is_pci = init->is_pci;
if ( dev_priv->is_pci && !dev->sg ) {
DRM_DEBUG( "PCI GART memory not allocated!\n" );
DRM_ERROR( "PCI GART memory not allocated!\n" );
DRM(free)( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER );
dev->dev_private = NULL;
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce( dev );
return -EINVAL;
}
@ -401,8 +399,8 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
if ( dev_priv->usec_timeout < 1 ||
dev_priv->usec_timeout > R128_MAX_USEC_TIMEOUT ) {
DRM_DEBUG( "TIMEOUT problem!\n" );
DRM(free)( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER );
dev->dev_private = NULL;
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce( dev );
return -EINVAL;
}
@ -421,8 +419,8 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
( init->cce_mode != R128_PM4_64BM_128INDBM ) &&
( init->cce_mode != R128_PM4_64BM_64VCBM_64INDBM ) ) {
DRM_DEBUG( "Bad cce_mode!\n" );
DRM(free)( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER );
dev->dev_private = NULL;
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce( dev );
return -EINVAL;
}
@ -494,16 +492,58 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
break;
}
}
if(!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce( dev );
return -EINVAL;
}
DRM_FIND_MAP( dev_priv->fb, init->fb_offset );
if(!dev_priv->fb) {
DRM_ERROR("could not find framebuffer!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce( dev );
return -EINVAL;
}
DRM_FIND_MAP( dev_priv->mmio, init->mmio_offset );
if(!dev_priv->mmio) {
DRM_ERROR("could not find mmio region!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce( dev );
return -EINVAL;
}
DRM_FIND_MAP( dev_priv->cce_ring, init->ring_offset );
if(!dev_priv->cce_ring) {
DRM_ERROR("could not find cce ring region!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce( dev );
return -EINVAL;
}
DRM_FIND_MAP( dev_priv->ring_rptr, init->ring_rptr_offset );
if(!dev_priv->ring_rptr) {
DRM_ERROR("could not find ring read pointer!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce( dev );
return -EINVAL;
}
DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset );
if(!dev_priv->buffers) {
DRM_ERROR("could not find dma buffer region!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce( dev );
return -EINVAL;
}
if ( !dev_priv->is_pci ) {
DRM_FIND_MAP( dev_priv->agp_textures,
init->agp_textures_offset );
if(!dev_priv->agp_textures) {
DRM_ERROR("could not find agp texture region!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce( dev );
return -EINVAL;
}
}
dev_priv->sarea_priv =
@ -514,6 +554,14 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
DRM_IOREMAP( dev_priv->cce_ring );
DRM_IOREMAP( dev_priv->ring_rptr );
DRM_IOREMAP( dev_priv->buffers );
if(!dev_priv->cce_ring->handle ||
!dev_priv->ring_rptr->handle ||
!dev_priv->buffers->handle) {
DRM_ERROR("Could not ioremap agp regions!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce( dev );
return -ENOMEM;
}
} else {
dev_priv->cce_ring->handle =
(void *)dev_priv->cce_ring->offset;
@ -553,19 +601,20 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
if ( dev_priv->is_pci ) {
dev_priv->phys_pci_gart = DRM(ati_pcigart_init)( dev );
if ( !dev_priv->phys_pci_gart ) {
DRM_DEBUG( "failed to init PCI GART!\n" );
DRM_ERROR( "failed to init PCI GART!\n" );
DRM(free)( dev_priv, sizeof(*dev_priv),
DRM_MEM_DRIVER );
dev->dev_private = NULL;
return -EINVAL;
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce( dev );
return -ENOMEM;
}
R128_WRITE( R128_PCI_GART_PAGE,
virt_to_bus( (void *)dev_priv->phys_pci_gart ) );
}
r128_cce_init_ring_buffer( dev );
r128_cce_init_ring_buffer( dev, dev_priv );
r128_cce_load_microcode( dev_priv );
dev->dev_private = (void *)dev_priv;
r128_do_engine_reset( dev );
return 0;

View File

@ -41,9 +41,9 @@
#define DRIVER_DESC "ATI Rage 128"
#define DRIVER_DATE "20010405"
#define DRIVER_MAJOR 3
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
#define DRIVER_PATCHLEVEL 6
#define DRIVER_IOCTLS \
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { r128_cce_buffers, 1, 0 }, \

View File

@ -910,7 +910,7 @@ static int r128_cce_dispatch_write_span( drm_device_t *dev,
return -EFAULT;
}
buffer = kmalloc( depth->n * sizeof(u32), 0 );
buffer = kmalloc( depth->n * sizeof(u32), GFP_KERNEL );
if ( buffer == NULL )
return -ENOMEM;
if ( copy_from_user( buffer, depth->buffer,
@ -920,7 +920,7 @@ static int r128_cce_dispatch_write_span( drm_device_t *dev,
}
if ( depth->mask ) {
mask = kmalloc( depth->n * sizeof(u8), 0 );
mask = kmalloc( depth->n * sizeof(u8), GFP_KERNEL );
if ( mask == NULL ) {
kfree( buffer );
return -ENOMEM;
@ -997,11 +997,11 @@ static int r128_cce_dispatch_write_pixels( drm_device_t *dev,
count = depth->n;
x = kmalloc( count * sizeof(*x), 0 );
x = kmalloc( count * sizeof(*x), GFP_KERNEL );
if ( x == NULL ) {
return -ENOMEM;
}
y = kmalloc( count * sizeof(*y), 0 );
y = kmalloc( count * sizeof(*y), GFP_KERNEL );
if ( y == NULL ) {
kfree( x );
return -ENOMEM;
@ -1017,7 +1017,7 @@ static int r128_cce_dispatch_write_pixels( drm_device_t *dev,
return -EFAULT;
}
buffer = kmalloc( depth->n * sizeof(u32), 0 );
buffer = kmalloc( depth->n * sizeof(u32), GFP_KERNEL );
if ( buffer == NULL ) {
kfree( x );
kfree( y );
@ -1032,7 +1032,7 @@ static int r128_cce_dispatch_write_pixels( drm_device_t *dev,
}
if ( depth->mask ) {
mask = kmalloc( depth->n * sizeof(u8), 0 );
mask = kmalloc( depth->n * sizeof(u8), GFP_KERNEL );
if ( mask == NULL ) {
kfree( x );
kfree( y );
@ -1157,11 +1157,11 @@ static int r128_cce_dispatch_read_pixels( drm_device_t *dev,
count = dev_priv->depth_pitch;
}
x = kmalloc( count * sizeof(*x), 0 );
x = kmalloc( count * sizeof(*x), GFP_KERNEL );
if ( x == NULL ) {
return -ENOMEM;
}
y = kmalloc( count * sizeof(*y), 0 );
y = kmalloc( count * sizeof(*y), GFP_KERNEL );
if ( y == NULL ) {
kfree( x );
return -ENOMEM;

View File

@ -1,298 +0,0 @@
/* radeon_bufs.c -- IOCTLs to manage buffers -*- linux-c -*-
*
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Fremont, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Kevin E. Martin <martin@valinux.com>
* Rickard E. (Rik) Faith <faith@valinux.com>
* Jeff Hartmann <jhartmann@valinux.com>
*
*/
#define __NO_VERSION__
#include <linux/config.h>
#include "drmP.h"
#include "radeon_drv.h"
#include "linux/un.h"
#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
int radeon_addbufs_agp(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
drm_buf_entry_t *entry;
drm_buf_t *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
int order;
int size;
int alignment;
int page_order;
int total;
int byte_count;
int i;
if (!dma) return -EINVAL;
if (copy_from_user(&request, (drm_buf_desc_t *)arg, sizeof(request)))
return -EFAULT;
count = request.count;
order = drm_order(request.size);
size = 1 << order;
alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size):size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
byte_count = 0;
agp_offset = dev->agp->base + request.agp_start;
DRM_DEBUG("count: %d\n", count);
DRM_DEBUG("order: %d\n", order);
DRM_DEBUG("size: %d\n", size);
DRM_DEBUG("agp_offset: %ld\n", agp_offset);
DRM_DEBUG("alignment: %d\n", alignment);
DRM_DEBUG("page_order: %d\n", page_order);
DRM_DEBUG("total: %d\n", total);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
if (dev->queue_count) return -EBUSY; /* Not while in use */
spin_lock(&dev->count_lock);
if (dev->buf_use) {
spin_unlock(&dev->count_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->count_lock);
down(&dev->struct_sem);
entry = &dma->bufs[order];
if (entry->buf_count) {
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->buf_size = size;
entry->page_order = page_order;
offset = 0;
for (offset = 0;
entry->buf_count < count;
offset += alignment, ++entry->buf_count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
buf->offset = (dma->byte_count + offset);
buf->address = (void *)(agp_offset + offset);
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
init_waitqueue_head(&buf->dma_wait);
buf->pid = 0;
buf->dev_priv_size = sizeof(drm_radeon_buf_priv_t);
buf->dev_private = drm_alloc(sizeof(drm_radeon_buf_priv_t),
DRM_MEM_BUFS);
memset(buf->dev_private, 0, buf->dev_priv_size);
#if DRM_DMA_HISTOGRAM
buf->time_queued = 0;
buf->time_dispatched = 0;
buf->time_completed = 0;
buf->time_freed = 0;
#endif
byte_count += PAGE_SIZE << page_order;
DRM_DEBUG("buffer %d @ %p\n",
entry->buf_count, buf->address);
}
DRM_DEBUG("byte_count: %d\n", byte_count);
dma->buflist = drm_realloc(dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist),
DRM_MEM_BUFS);
for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
dma->buflist[i] = &entry->buflist[i - dma->buf_count];
dma->buf_count += entry->buf_count;
dma->byte_count += byte_count;
drm_freelist_create(&entry->freelist, entry->buf_count);
for (i = 0; i < entry->buf_count; i++) {
drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
}
up(&dev->struct_sem);
request.count = entry->buf_count;
request.size = size;
if (copy_to_user((drm_buf_desc_t *)arg, &request, sizeof(request)))
return -EFAULT;
dma->flags = _DRM_DMA_USE_AGP;
atomic_dec(&dev->buf_alloc);
return 0;
}
#endif
int radeon_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_buf_desc_t request;
if (!dev_priv || dev_priv->is_pci) return -EINVAL;
if (copy_from_user(&request, (drm_buf_desc_t *)arg, sizeof(request)))
return -EFAULT;
#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
if (request.flags & _DRM_AGP_BUFFER)
return radeon_addbufs_agp(inode, filp, cmd, arg);
else
#endif
return -EINVAL;
}
int radeon_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;
int retcode = 0;
const int zero = 0;
unsigned long virtual;
unsigned long address;
drm_buf_map_t request;
int i;
if (!dma || !dev_priv || dev_priv->is_pci) return -EINVAL;
DRM_DEBUG("\n");
spin_lock(&dev->count_lock);
if (atomic_read(&dev->buf_alloc)) {
spin_unlock(&dev->count_lock);
return -EBUSY;
}
++dev->buf_use; /* Can't allocate more after this call */
spin_unlock(&dev->count_lock);
if (copy_from_user(&request, (drm_buf_map_t *)arg, sizeof(request)))
return -EFAULT;
if (request.count >= dma->buf_count) {
if (dma->flags & _DRM_DMA_USE_AGP) {
drm_map_t *map;
map = dev_priv->buffers;
if (!map) {
retcode = -EINVAL;
goto done;
}
down(&current->mm->mmap_sem);
virtual = do_mmap(filp, 0, map->size,
PROT_READ|PROT_WRITE,
MAP_SHARED,
(unsigned long)map->offset);
up(&current->mm->mmap_sem);
} else {
down(&current->mm->mmap_sem);
virtual = do_mmap(filp, 0, dma->byte_count,
PROT_READ|PROT_WRITE, MAP_SHARED, 0);
up(&current->mm->mmap_sem);
}
if (virtual > -1024UL) {
/* Real error */
retcode = (signed long)virtual;
goto done;
}
request.virtual = (void *)virtual;
for (i = 0; i < dma->buf_count; i++) {
if (copy_to_user(&request.list[i].idx,
&dma->buflist[i]->idx,
sizeof(request.list[0].idx))) {
retcode = -EFAULT;
goto done;
}
if (copy_to_user(&request.list[i].total,
&dma->buflist[i]->total,
sizeof(request.list[0].total))) {
retcode = -EFAULT;
goto done;
}
if (copy_to_user(&request.list[i].used,
&zero,
sizeof(zero))) {
retcode = -EFAULT;
goto done;
}
address = virtual + dma->buflist[i]->offset;
if (copy_to_user(&request.list[i].address,
&address,
sizeof(address))) {
retcode = -EFAULT;
goto done;
}
}
}
done:
request.count = dma->buf_count;
DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
if (copy_to_user((drm_buf_map_t *)arg, &request, sizeof(request)))
return -EFAULT;
return retcode;
}

View File

@ -574,12 +574,11 @@ static int radeon_do_engine_reset( drm_device_t *dev )
return 0;
}
static void radeon_cp_init_ring_buffer( drm_device_t *dev )
static void radeon_cp_init_ring_buffer( drm_device_t *dev,
drm_radeon_private_t *dev_priv )
{
drm_radeon_private_t *dev_priv = dev->dev_private;
u32 ring_start, cur_read_ptr;
u32 tmp;
DRM_DEBUG( "%s\n", __FUNCTION__ );
/* Initialize the memory controller */
RADEON_WRITE( RADEON_MC_FB_LOCATION,
@ -659,7 +658,6 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
dev_priv = DRM(alloc)( sizeof(drm_radeon_private_t), DRM_MEM_DRIVER );
if ( dev_priv == NULL )
return -ENOMEM;
dev->dev_private = (void *)dev_priv;
memset( dev_priv, 0, sizeof(drm_radeon_private_t) );
@ -670,16 +668,16 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
*/
if ( dev_priv->is_pci ) {
DRM_ERROR( "PCI GART not yet supported for Radeon!\n" );
DRM(free)( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER );
dev->dev_private = NULL;
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return -EINVAL;
}
#endif
if ( dev_priv->is_pci && !dev->sg ) {
DRM_ERROR( "PCI GART memory not allocated!\n" );
DRM(free)( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER );
dev->dev_private = NULL;
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return -EINVAL;
}
@ -687,8 +685,8 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
if ( dev_priv->usec_timeout < 1 ||
dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT ) {
DRM_DEBUG( "TIMEOUT problem!\n" );
DRM(free)( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER );
dev->dev_private = NULL;
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return -EINVAL;
}
@ -705,8 +703,8 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
if ( ( init->cp_mode != RADEON_CSQ_PRIBM_INDDIS ) &&
( init->cp_mode != RADEON_CSQ_PRIBM_INDBM ) ) {
DRM_DEBUG( "BAD cp_mode (%x)!\n", init->cp_mode );
DRM(free)( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER );
dev->dev_private = NULL;
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return -EINVAL;
}
@ -782,16 +780,58 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
break;
}
}
if(!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return -EINVAL;
}
DRM_FIND_MAP( dev_priv->fb, init->fb_offset );
if(!dev_priv->fb) {
DRM_ERROR("could not find framebuffer!\n");
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return -EINVAL;
}
DRM_FIND_MAP( dev_priv->mmio, init->mmio_offset );
if(!dev_priv->mmio) {
DRM_ERROR("could not find mmio region!\n");
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return -EINVAL;
}
DRM_FIND_MAP( dev_priv->cp_ring, init->ring_offset );
if(!dev_priv->cp_ring) {
DRM_ERROR("could not find cp ring region!\n");
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return -EINVAL;
}
DRM_FIND_MAP( dev_priv->ring_rptr, init->ring_rptr_offset );
if(!dev_priv->ring_rptr) {
DRM_ERROR("could not find ring read pointer!\n");
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return -EINVAL;
}
DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset );
if(!dev_priv->buffers) {
DRM_ERROR("could not find dma buffer region!\n");
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return -EINVAL;
}
if ( !dev_priv->is_pci ) {
DRM_FIND_MAP( dev_priv->agp_textures,
init->agp_textures_offset );
if(!dev_priv->agp_textures) {
DRM_ERROR("could not find agp texture region!\n");
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return -EINVAL;
}
}
dev_priv->sarea_priv =
@ -802,6 +842,14 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
DRM_IOREMAP( dev_priv->cp_ring );
DRM_IOREMAP( dev_priv->ring_rptr );
DRM_IOREMAP( dev_priv->buffers );
if(!dev_priv->cp_ring->handle ||
!dev_priv->ring_rptr->handle ||
!dev_priv->buffers->handle) {
DRM_ERROR("could not find ioremap agp regions!\n");
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return -EINVAL;
}
} else {
dev_priv->cp_ring->handle =
(void *)dev_priv->cp_ring->offset;
@ -884,10 +932,9 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
dev_priv->phys_pci_gart = DRM(ati_pcigart_init)( dev );
if ( !dev_priv->phys_pci_gart ) {
DRM_ERROR( "failed to init PCI GART!\n" );
DRM(free)( dev_priv, sizeof(*dev_priv),
DRM_MEM_DRIVER );
dev->dev_private = NULL;
return -EINVAL;
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return -ENOMEM;
}
/* Turn on PCI GART
*/
@ -919,13 +966,16 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
}
radeon_cp_load_microcode( dev_priv );
radeon_cp_init_ring_buffer( dev );
radeon_do_engine_reset( dev );
radeon_cp_init_ring_buffer( dev, dev_priv );
#if ROTATE_BUFS
dev_priv->last_buf = 0;
#endif
dev->dev_private = (void *)dev_priv;
radeon_do_engine_reset( dev );
return 0;
}