Move things around to reduce public symbols and even out files. Switch to
get_order from drm_order.main
parent
3aef3841d0
commit
1c0a437fa2
|
@ -740,28 +740,24 @@ extern void drm_parse_options( char *s );
|
|||
extern int drm_cpu_valid( void );
|
||||
|
||||
/* Driver support (drm_drv.h) */
|
||||
extern int drm_fb_loaded;
|
||||
extern int __devinit drm_init(struct pci_driver *driver, struct pci_device_id* pciidlist,
|
||||
struct drm_driver_fn *driver_fn);
|
||||
extern void __exit drm_exit (struct pci_driver *driver);
|
||||
extern void __exit drm_cleanup_pci(struct pci_dev *pdev);
|
||||
extern int drm_version(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_open(struct inode *inode, struct file *filp);
|
||||
extern int drm_release(struct inode *inode, struct file *filp);
|
||||
extern int drm_ioctl(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_lock(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_unlock(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_fill_in_dev(drm_device_t *dev, struct pci_dev *pdev,
|
||||
const struct pci_device_id *ent, struct drm_driver_fn *driver_fn);
|
||||
extern int drm_fb_loaded;
|
||||
extern int drm_takedown( drm_device_t *dev );
|
||||
|
||||
/* Device support (drm_fops.h) */
|
||||
extern int drm_open(struct inode *inode, struct file *filp);
|
||||
extern int drm_stub_open(struct inode *inode, struct file *filp);
|
||||
extern int drm_open_helper(struct inode *inode, struct file *filp,
|
||||
drm_device_t *dev);
|
||||
extern int drm_fasync(int fd, struct file *filp, int on);
|
||||
extern int drm_release(struct inode *inode, struct file *filp);
|
||||
|
||||
/* Mapping support (drm_vm.h) */
|
||||
extern void drm_vm_open(struct vm_area_struct *vma);
|
||||
|
@ -802,6 +798,8 @@ extern int drm_getstats(struct inode *inode, struct file *filp,
|
|||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_setversion(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_noop(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
|
||||
/* Context IOCTL support (drm_context.h) */
|
||||
extern int drm_resctx( struct inode *inode, struct file *filp,
|
||||
|
@ -847,11 +845,11 @@ extern int drm_getmagic(struct inode *inode, struct file *filp,
|
|||
extern int drm_authmagic(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
|
||||
/* Placeholder for ioctls past */
|
||||
extern int drm_noop(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
|
||||
/* Locking IOCTL support (drm_lock.h) */
|
||||
extern int drm_lock(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_unlock(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_lock_take(__volatile__ unsigned int *lock,
|
||||
unsigned int context);
|
||||
extern int drm_lock_transfer(drm_device_t *dev,
|
||||
|
@ -863,7 +861,6 @@ extern int drm_lock_free(drm_device_t *dev,
|
|||
extern int drm_notifier(void *priv);
|
||||
|
||||
/* Buffer management support (drm_bufs.h) */
|
||||
extern int drm_order( unsigned long size );
|
||||
extern int drm_addmap( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg );
|
||||
extern int drm_rmmap( struct inode *inode, struct file *filp,
|
||||
|
@ -938,7 +935,6 @@ extern unsigned int cards_limit;
|
|||
extern drm_minor_t *drm_minors;
|
||||
extern struct drm_sysfs_class *drm_class;
|
||||
extern struct proc_dir_entry *drm_proc_root;
|
||||
extern struct file_operations drm_stub_fops;
|
||||
|
||||
/* Proc support (drm_proc.h) */
|
||||
extern int drm_proc_init(drm_device_t *dev,
|
||||
|
|
|
@ -36,30 +36,6 @@
|
|||
#include <linux/vmalloc.h>
|
||||
#include "drmP.h"
|
||||
|
||||
/**
|
||||
* Compute size order. Returns the exponent of the smaller power of two which
|
||||
* is greater or equal to given number.
|
||||
*
|
||||
* \param size size.
|
||||
* \return order.
|
||||
*
|
||||
* \todo Can be made faster.
|
||||
*/
|
||||
int drm_order( unsigned long size )
|
||||
{
|
||||
int order;
|
||||
unsigned long tmp;
|
||||
|
||||
for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
|
||||
;
|
||||
|
||||
if (size & (size - 1))
|
||||
++order;
|
||||
|
||||
return order;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_order);
|
||||
|
||||
/**
|
||||
* Adjusts the memory offset to its absolute value according to the mapping
|
||||
* type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
|
||||
|
@ -227,7 +203,7 @@ int drm_addmap( struct inode *inode, struct file *filp,
|
|||
case _DRM_SHM:
|
||||
map->handle = vmalloc_32(map->size);
|
||||
DRM_DEBUG( "%lu %d %p\n",
|
||||
map->size, drm_order( map->size ), map->handle );
|
||||
map->size, get_order( map->size ), map->handle );
|
||||
if ( !map->handle ) {
|
||||
drm_free( map, sizeof(*map), DRM_MEM_MAPS );
|
||||
return -ENOMEM;
|
||||
|
@ -460,7 +436,7 @@ int drm_addbufs_agp( struct inode *inode, struct file *filp,
|
|||
return -EFAULT;
|
||||
|
||||
count = request.count;
|
||||
order = drm_order( request.size );
|
||||
order = get_order( request.size );
|
||||
size = 1 << order;
|
||||
|
||||
alignment = (request.flags & _DRM_PAGE_ALIGN)
|
||||
|
@ -628,7 +604,7 @@ int drm_addbufs_pci( struct inode *inode, struct file *filp,
|
|||
return -EFAULT;
|
||||
|
||||
count = request.count;
|
||||
order = drm_order( request.size );
|
||||
order = get_order( request.size );
|
||||
size = 1 << order;
|
||||
|
||||
DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
|
||||
|
@ -858,7 +834,7 @@ int drm_addbufs_sg( struct inode *inode, struct file *filp,
|
|||
return -EFAULT;
|
||||
|
||||
count = request.count;
|
||||
order = drm_order( request.size );
|
||||
order = get_order( request.size );
|
||||
size = 1 << order;
|
||||
|
||||
alignment = (request.flags & _DRM_PAGE_ALIGN)
|
||||
|
@ -1158,7 +1134,7 @@ int drm_markbufs( struct inode *inode, struct file *filp,
|
|||
|
||||
DRM_DEBUG( "%d, %d, %d\n",
|
||||
request.size, request.low_mark, request.high_mark );
|
||||
order = drm_order( request.size );
|
||||
order = get_order( request.size );
|
||||
if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
|
||||
entry = &dma->bufs[order];
|
||||
|
||||
|
|
|
@ -142,82 +142,6 @@ drm_ioctl_desc_t drm_ioctls[] = {
|
|||
|
||||
#define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( drm_ioctls )
|
||||
|
||||
static int drm_setup( drm_device_t *dev )
|
||||
{
|
||||
int i;
|
||||
|
||||
if (dev->fn_tbl->presetup)
|
||||
dev->fn_tbl->presetup(dev);
|
||||
|
||||
atomic_set( &dev->ioctl_count, 0 );
|
||||
atomic_set( &dev->vma_count, 0 );
|
||||
dev->buf_use = 0;
|
||||
atomic_set( &dev->buf_alloc, 0 );
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
|
||||
{
|
||||
i = drm_dma_setup( dev );
|
||||
if ( i < 0 )
|
||||
return i;
|
||||
}
|
||||
|
||||
for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ )
|
||||
atomic_set( &dev->counts[i], 0 );
|
||||
|
||||
for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
|
||||
dev->magiclist[i].head = NULL;
|
||||
dev->magiclist[i].tail = NULL;
|
||||
}
|
||||
|
||||
dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist),
|
||||
DRM_MEM_CTXLIST);
|
||||
if(dev->ctxlist == NULL) return -ENOMEM;
|
||||
memset(dev->ctxlist, 0, sizeof(*dev->ctxlist));
|
||||
INIT_LIST_HEAD(&dev->ctxlist->head);
|
||||
|
||||
dev->vmalist = NULL;
|
||||
dev->sigdata.lock = dev->lock.hw_lock = NULL;
|
||||
init_waitqueue_head( &dev->lock.lock_queue );
|
||||
dev->queue_count = 0;
|
||||
dev->queue_reserved = 0;
|
||||
dev->queue_slots = 0;
|
||||
dev->queuelist = NULL;
|
||||
dev->irq_enabled = 0;
|
||||
dev->context_flag = 0;
|
||||
dev->interrupt_flag = 0;
|
||||
dev->dma_flag = 0;
|
||||
dev->last_context = 0;
|
||||
dev->last_switch = 0;
|
||||
dev->last_checked = 0;
|
||||
init_waitqueue_head( &dev->context_wait );
|
||||
dev->if_version = 0;
|
||||
|
||||
dev->ctx_start = 0;
|
||||
dev->lck_start = 0;
|
||||
|
||||
dev->buf_rp = dev->buf;
|
||||
dev->buf_wp = dev->buf;
|
||||
dev->buf_end = dev->buf + DRM_BSZ;
|
||||
dev->buf_async = NULL;
|
||||
init_waitqueue_head( &dev->buf_readers );
|
||||
init_waitqueue_head( &dev->buf_writers );
|
||||
|
||||
DRM_DEBUG( "\n" );
|
||||
|
||||
/*
|
||||
* The kernel's context could be created here, but is now created
|
||||
* in drm_dma_enqueue. This is more resource-efficient for
|
||||
* hardware that does not do DMA, but may mean that
|
||||
* drm_select_queue fails between the time the interrupt is
|
||||
* initialized and the time the queues are initialized.
|
||||
*/
|
||||
if (dev->fn_tbl->postsetup)
|
||||
dev->fn_tbl->postsetup(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Take down the DRM device.
|
||||
*
|
||||
|
@ -227,7 +151,7 @@ static int drm_setup( drm_device_t *dev )
|
|||
*
|
||||
* \sa drm_device and setup().
|
||||
*/
|
||||
static int drm_takedown( drm_device_t *dev )
|
||||
int drm_takedown( drm_device_t *dev )
|
||||
{
|
||||
drm_magic_entry_t *pt, *next;
|
||||
drm_map_t *map;
|
||||
|
@ -362,87 +286,6 @@ static int drm_takedown( drm_device_t *dev )
|
|||
return 0;
|
||||
}
|
||||
|
||||
int drm_fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver_fn *driver_fn)
|
||||
{
|
||||
int retcode;
|
||||
|
||||
dev->count_lock = SPIN_LOCK_UNLOCKED;
|
||||
init_timer( &dev->timer );
|
||||
sema_init( &dev->struct_sem, 1 );
|
||||
sema_init( &dev->ctxlist_sem, 1 );
|
||||
|
||||
dev->name = DRIVER_NAME;
|
||||
dev->pdev = pdev;
|
||||
|
||||
#ifdef __alpha__
|
||||
dev->hose = pdev->sysdata;
|
||||
dev->pci_domain = dev->hose->bus->number;
|
||||
#else
|
||||
dev->pci_domain = 0;
|
||||
#endif
|
||||
dev->pci_bus = pdev->bus->number;
|
||||
dev->pci_slot = PCI_SLOT(pdev->devfn);
|
||||
dev->pci_func = PCI_FUNC(pdev->devfn);
|
||||
dev->irq = pdev->irq;
|
||||
|
||||
dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS);
|
||||
if(dev->maplist == NULL) return -ENOMEM;
|
||||
INIT_LIST_HEAD(&dev->maplist->head);
|
||||
|
||||
/* the DRM has 6 counters */
|
||||
dev->counters = 6;
|
||||
dev->types[0] = _DRM_STAT_LOCK;
|
||||
dev->types[1] = _DRM_STAT_OPENS;
|
||||
dev->types[2] = _DRM_STAT_CLOSES;
|
||||
dev->types[3] = _DRM_STAT_IOCTLS;
|
||||
dev->types[4] = _DRM_STAT_LOCKS;
|
||||
dev->types[5] = _DRM_STAT_UNLOCKS;
|
||||
|
||||
dev->fn_tbl = driver_fn;
|
||||
|
||||
if (dev->fn_tbl->preinit)
|
||||
if ((retcode = dev->fn_tbl->preinit(dev, ent->driver_data)))
|
||||
goto error_out_unreg;
|
||||
|
||||
if (drm_core_has_AGP(dev)) {
|
||||
dev->agp = drm_agp_init();
|
||||
if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && (dev->agp == NULL)) {
|
||||
DRM_ERROR( "Cannot initialize the agpgart module.\n" );
|
||||
retcode = -EINVAL;
|
||||
goto error_out_unreg;
|
||||
}
|
||||
|
||||
|
||||
if (drm_core_has_MTRR(dev)) {
|
||||
if (dev->agp)
|
||||
dev->agp->agp_mtrr = mtrr_add( dev->agp->agp_info.aper_base,
|
||||
dev->agp->agp_info.aper_size*1024*1024,
|
||||
MTRR_TYPE_WRCOMB,
|
||||
1 );
|
||||
}
|
||||
}
|
||||
|
||||
retcode = drm_ctxbitmap_init( dev );
|
||||
if( retcode ) {
|
||||
DRM_ERROR( "Cannot allocate memory for context bitmap.\n" );
|
||||
goto error_out_unreg;
|
||||
}
|
||||
|
||||
dev->device = MKDEV(DRM_MAJOR, dev->minor );
|
||||
DRM_DEBUG("driver_fn->postinit %p\n", driver_fn->postinit);
|
||||
|
||||
/* postinit is a required function to display the signon banner */
|
||||
/* drivers add secondary heads here if needed */
|
||||
if ((retcode = dev->fn_tbl->postinit(dev, ent->driver_data)))
|
||||
goto error_out_unreg;
|
||||
|
||||
return 0;
|
||||
|
||||
error_out_unreg:
|
||||
drm_takedown(dev);
|
||||
return retcode;
|
||||
}
|
||||
|
||||
void __exit drm_cleanup_pci(struct pci_dev *pdev)
|
||||
{
|
||||
drm_device_t *dev = pci_get_drvdata(pdev);
|
||||
|
@ -630,6 +473,12 @@ void __exit drm_exit (struct pci_driver *driver)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_exit);
|
||||
|
||||
/** File operations structure */
|
||||
static struct file_operations drm_stub_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_stub_open
|
||||
};
|
||||
|
||||
static int __init drm_core_init(void)
|
||||
{
|
||||
int ret = -ENOMEM;
|
||||
|
@ -725,210 +574,6 @@ int drm_version( struct inode *inode, struct file *filp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Open file.
|
||||
*
|
||||
* \param inode device inode
|
||||
* \param filp file pointer.
|
||||
* \return zero on success or a negative number on failure.
|
||||
*
|
||||
* Searches the DRM device with the same minor number, calls open_helper(), and
|
||||
* increments the device open count. If the open count was previous at zero,
|
||||
* i.e., it's the first that the device is open, then calls setup().
|
||||
*/
|
||||
int drm_open( struct inode *inode, struct file *filp )
|
||||
{
|
||||
drm_device_t *dev = NULL;
|
||||
int minor = iminor(inode);
|
||||
int retcode = 0;
|
||||
|
||||
if (!((minor >= 0) && (minor < cards_limit)))
|
||||
return -ENODEV;
|
||||
|
||||
dev = drm_minors[minor].dev;
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
retcode = drm_open_helper( inode, filp, dev );
|
||||
if ( !retcode ) {
|
||||
atomic_inc( &dev->counts[_DRM_STAT_OPENS] );
|
||||
spin_lock( &dev->count_lock );
|
||||
if ( !dev->open_count++ ) {
|
||||
spin_unlock( &dev->count_lock );
|
||||
return drm_setup( dev );
|
||||
}
|
||||
spin_unlock( &dev->count_lock );
|
||||
}
|
||||
|
||||
return retcode;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_open);
|
||||
|
||||
/**
|
||||
* Release file.
|
||||
*
|
||||
* \param inode device inode
|
||||
* \param filp file pointer.
|
||||
* \return zero on success or a negative number on failure.
|
||||
*
|
||||
* If the hardware lock is held then free it, and take it again for the kernel
|
||||
* context since it's necessary to reclaim buffers. Unlink the file private
|
||||
* data from its list and free it. Decreases the open count and if it reaches
|
||||
* zero calls takedown().
|
||||
*/
|
||||
int drm_release( struct inode *inode, struct file *filp )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev;
|
||||
int retcode = 0;
|
||||
|
||||
lock_kernel();
|
||||
dev = priv->dev;
|
||||
|
||||
DRM_DEBUG( "open_count = %d\n", dev->open_count );
|
||||
|
||||
if (dev->fn_tbl->prerelease)
|
||||
dev->fn_tbl->prerelease(dev, filp);
|
||||
|
||||
/* ========================================================
|
||||
* Begin inline drm_release
|
||||
*/
|
||||
|
||||
DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n",
|
||||
current->pid, (long)old_encode_dev(dev->device), dev->open_count );
|
||||
|
||||
if ( priv->lock_count && dev->lock.hw_lock &&
|
||||
_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
|
||||
dev->lock.filp == filp ) {
|
||||
DRM_DEBUG( "File %p released, freeing lock for context %d\n",
|
||||
filp,
|
||||
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
|
||||
|
||||
if (dev->fn_tbl->release)
|
||||
dev->fn_tbl->release(dev, filp);
|
||||
|
||||
drm_lock_free( dev, &dev->lock.hw_lock->lock,
|
||||
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
|
||||
|
||||
/* FIXME: may require heavy-handed reset of
|
||||
hardware at this point, possibly
|
||||
processed via a callback to the X
|
||||
server. */
|
||||
}
|
||||
else if ( dev->fn_tbl->release && priv->lock_count && dev->lock.hw_lock ) {
|
||||
/* The lock is required to reclaim buffers */
|
||||
DECLARE_WAITQUEUE( entry, current );
|
||||
|
||||
add_wait_queue( &dev->lock.lock_queue, &entry );
|
||||
for (;;) {
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
if ( !dev->lock.hw_lock ) {
|
||||
/* Device has been unregistered */
|
||||
retcode = -EINTR;
|
||||
break;
|
||||
}
|
||||
if ( drm_lock_take( &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT ) ) {
|
||||
dev->lock.filp = filp;
|
||||
dev->lock.lock_time = jiffies;
|
||||
atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
|
||||
break; /* Got lock */
|
||||
}
|
||||
/* Contention */
|
||||
schedule();
|
||||
if ( signal_pending( current ) ) {
|
||||
retcode = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
}
|
||||
current->state = TASK_RUNNING;
|
||||
remove_wait_queue( &dev->lock.lock_queue, &entry );
|
||||
if( !retcode ) {
|
||||
if (dev->fn_tbl->release)
|
||||
dev->fn_tbl->release(dev, filp);
|
||||
drm_lock_free( dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT );
|
||||
}
|
||||
}
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
|
||||
{
|
||||
dev->fn_tbl->reclaim_buffers(filp);
|
||||
}
|
||||
|
||||
drm_fasync( -1, filp, 0 );
|
||||
|
||||
down( &dev->ctxlist_sem );
|
||||
if ( !list_empty( &dev->ctxlist->head ) ) {
|
||||
drm_ctx_list_t *pos, *n;
|
||||
|
||||
list_for_each_entry_safe( pos, n, &dev->ctxlist->head, head ) {
|
||||
if ( pos->tag == priv &&
|
||||
pos->handle != DRM_KERNEL_CONTEXT ) {
|
||||
if (dev->fn_tbl->context_dtor)
|
||||
dev->fn_tbl->context_dtor(dev, pos->handle);
|
||||
|
||||
drm_ctxbitmap_free( dev, pos->handle );
|
||||
|
||||
list_del( &pos->head );
|
||||
drm_free( pos, sizeof(*pos), DRM_MEM_CTXLIST );
|
||||
--dev->ctx_count;
|
||||
}
|
||||
}
|
||||
}
|
||||
up( &dev->ctxlist_sem );
|
||||
|
||||
down( &dev->struct_sem );
|
||||
if ( priv->remove_auth_on_close == 1 ) {
|
||||
drm_file_t *temp = dev->file_first;
|
||||
while ( temp ) {
|
||||
temp->authenticated = 0;
|
||||
temp = temp->next;
|
||||
}
|
||||
}
|
||||
if ( priv->prev ) {
|
||||
priv->prev->next = priv->next;
|
||||
} else {
|
||||
dev->file_first = priv->next;
|
||||
}
|
||||
if ( priv->next ) {
|
||||
priv->next->prev = priv->prev;
|
||||
} else {
|
||||
dev->file_last = priv->prev;
|
||||
}
|
||||
up( &dev->struct_sem );
|
||||
|
||||
if (dev->fn_tbl->free_filp_priv)
|
||||
dev->fn_tbl->free_filp_priv( dev, priv );
|
||||
drm_free( priv, sizeof(*priv), DRM_MEM_FILES );
|
||||
|
||||
/* ========================================================
|
||||
* End inline drm_release
|
||||
*/
|
||||
|
||||
atomic_inc( &dev->counts[_DRM_STAT_CLOSES] );
|
||||
spin_lock( &dev->count_lock );
|
||||
if ( !--dev->open_count ) {
|
||||
if ( atomic_read( &dev->ioctl_count ) || dev->blocked ) {
|
||||
DRM_ERROR( "Device busy: %d %d\n",
|
||||
atomic_read( &dev->ioctl_count ),
|
||||
dev->blocked );
|
||||
spin_unlock( &dev->count_lock );
|
||||
unlock_kernel();
|
||||
return -EBUSY;
|
||||
}
|
||||
spin_unlock( &dev->count_lock );
|
||||
unlock_kernel();
|
||||
return drm_takedown( dev );
|
||||
}
|
||||
spin_unlock( &dev->count_lock );
|
||||
|
||||
unlock_kernel();
|
||||
|
||||
return retcode;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_release);
|
||||
|
||||
/**
|
||||
* Called whenever a process performs an ioctl on /dev/drm.
|
||||
*
|
||||
|
@ -986,141 +631,3 @@ err_i1:
|
|||
}
|
||||
EXPORT_SYMBOL(drm_ioctl);
|
||||
|
||||
/**
|
||||
* Lock ioctl.
|
||||
*
|
||||
* \param inode device inode.
|
||||
* \param filp file pointer.
|
||||
* \param cmd command.
|
||||
* \param arg user argument, pointing to a drm_lock structure.
|
||||
* \return zero on success or negative number on failure.
|
||||
*
|
||||
* Add the current task to the lock wait queue, and attempt to take to lock.
|
||||
*/
|
||||
int drm_lock( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
DECLARE_WAITQUEUE( entry, current );
|
||||
drm_lock_t lock;
|
||||
int ret = 0;
|
||||
|
||||
++priv->lock_count;
|
||||
|
||||
if ( copy_from_user( &lock, (drm_lock_t __user *)arg, sizeof(lock) ) )
|
||||
return -EFAULT;
|
||||
|
||||
if ( lock.context == DRM_KERNEL_CONTEXT ) {
|
||||
DRM_ERROR( "Process %d using kernel context %d\n",
|
||||
current->pid, lock.context );
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_DEBUG( "%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
|
||||
lock.context, current->pid,
|
||||
dev->lock.hw_lock->lock, lock.flags );
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
|
||||
if ( lock.context < 0 )
|
||||
return -EINVAL;
|
||||
|
||||
add_wait_queue( &dev->lock.lock_queue, &entry );
|
||||
for (;;) {
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
if ( !dev->lock.hw_lock ) {
|
||||
/* Device has been unregistered */
|
||||
ret = -EINTR;
|
||||
break;
|
||||
}
|
||||
if ( drm_lock_take( &dev->lock.hw_lock->lock,
|
||||
lock.context ) ) {
|
||||
dev->lock.filp = filp;
|
||||
dev->lock.lock_time = jiffies;
|
||||
atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
|
||||
break; /* Got lock */
|
||||
}
|
||||
|
||||
/* Contention */
|
||||
schedule();
|
||||
if ( signal_pending( current ) ) {
|
||||
ret = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
}
|
||||
current->state = TASK_RUNNING;
|
||||
remove_wait_queue( &dev->lock.lock_queue, &entry );
|
||||
|
||||
sigemptyset( &dev->sigmask );
|
||||
sigaddset( &dev->sigmask, SIGSTOP );
|
||||
sigaddset( &dev->sigmask, SIGTSTP );
|
||||
sigaddset( &dev->sigmask, SIGTTIN );
|
||||
sigaddset( &dev->sigmask, SIGTTOU );
|
||||
dev->sigdata.context = lock.context;
|
||||
dev->sigdata.lock = dev->lock.hw_lock;
|
||||
block_all_signals( drm_notifier,
|
||||
&dev->sigdata, &dev->sigmask );
|
||||
|
||||
if (dev->fn_tbl->dma_ready && (lock.flags & _DRM_LOCK_READY))
|
||||
dev->fn_tbl->dma_ready(dev);
|
||||
|
||||
if ( dev->fn_tbl->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT ))
|
||||
return dev->fn_tbl->dma_quiescent(dev);
|
||||
|
||||
|
||||
if ( dev->fn_tbl->kernel_context_switch && dev->last_context != lock.context ) {
|
||||
dev->fn_tbl->kernel_context_switch(dev, dev->last_context,
|
||||
lock.context);
|
||||
}
|
||||
|
||||
|
||||
DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Unlock ioctl.
|
||||
*
|
||||
* \param inode device inode.
|
||||
* \param filp file pointer.
|
||||
* \param cmd command.
|
||||
* \param arg user argument, pointing to a drm_lock structure.
|
||||
* \return zero on success or negative number on failure.
|
||||
*
|
||||
* Transfer and free the lock.
|
||||
*/
|
||||
int drm_unlock( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_lock_t lock;
|
||||
|
||||
if ( copy_from_user( &lock, (drm_lock_t __user *)arg, sizeof(lock) ) )
|
||||
return -EFAULT;
|
||||
|
||||
if ( lock.context == DRM_KERNEL_CONTEXT ) {
|
||||
DRM_ERROR( "Process %d using kernel context %d\n",
|
||||
current->pid, lock.context );
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] );
|
||||
|
||||
if (dev->fn_tbl->kernel_context_switch_unlock)
|
||||
dev->fn_tbl->kernel_context_switch_unlock(dev);
|
||||
else
|
||||
{
|
||||
drm_lock_transfer( dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT );
|
||||
|
||||
if ( drm_lock_free( dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT ) ) {
|
||||
DRM_ERROR( "\n" );
|
||||
}
|
||||
}
|
||||
|
||||
unblock_all_signals();
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -37,6 +37,156 @@
|
|||
#include "drmP.h"
|
||||
#include <linux/poll.h>
|
||||
|
||||
static int drm_setup( drm_device_t *dev )
|
||||
{
|
||||
int i;
|
||||
|
||||
if (dev->fn_tbl->presetup)
|
||||
dev->fn_tbl->presetup(dev);
|
||||
|
||||
atomic_set( &dev->ioctl_count, 0 );
|
||||
atomic_set( &dev->vma_count, 0 );
|
||||
dev->buf_use = 0;
|
||||
atomic_set( &dev->buf_alloc, 0 );
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
|
||||
{
|
||||
i = drm_dma_setup( dev );
|
||||
if ( i < 0 )
|
||||
return i;
|
||||
}
|
||||
|
||||
for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ )
|
||||
atomic_set( &dev->counts[i], 0 );
|
||||
|
||||
for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
|
||||
dev->magiclist[i].head = NULL;
|
||||
dev->magiclist[i].tail = NULL;
|
||||
}
|
||||
|
||||
dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist),
|
||||
DRM_MEM_CTXLIST);
|
||||
if(dev->ctxlist == NULL) return -ENOMEM;
|
||||
memset(dev->ctxlist, 0, sizeof(*dev->ctxlist));
|
||||
INIT_LIST_HEAD(&dev->ctxlist->head);
|
||||
|
||||
dev->vmalist = NULL;
|
||||
dev->sigdata.lock = dev->lock.hw_lock = NULL;
|
||||
init_waitqueue_head( &dev->lock.lock_queue );
|
||||
dev->queue_count = 0;
|
||||
dev->queue_reserved = 0;
|
||||
dev->queue_slots = 0;
|
||||
dev->queuelist = NULL;
|
||||
dev->irq_enabled = 0;
|
||||
dev->context_flag = 0;
|
||||
dev->interrupt_flag = 0;
|
||||
dev->dma_flag = 0;
|
||||
dev->last_context = 0;
|
||||
dev->last_switch = 0;
|
||||
dev->last_checked = 0;
|
||||
init_waitqueue_head( &dev->context_wait );
|
||||
dev->if_version = 0;
|
||||
|
||||
dev->ctx_start = 0;
|
||||
dev->lck_start = 0;
|
||||
|
||||
dev->buf_rp = dev->buf;
|
||||
dev->buf_wp = dev->buf;
|
||||
dev->buf_end = dev->buf + DRM_BSZ;
|
||||
dev->buf_async = NULL;
|
||||
init_waitqueue_head( &dev->buf_readers );
|
||||
init_waitqueue_head( &dev->buf_writers );
|
||||
|
||||
DRM_DEBUG( "\n" );
|
||||
|
||||
/*
|
||||
* The kernel's context could be created here, but is now created
|
||||
* in drm_dma_enqueue. This is more resource-efficient for
|
||||
* hardware that does not do DMA, but may mean that
|
||||
* drm_select_queue fails between the time the interrupt is
|
||||
* initialized and the time the queues are initialized.
|
||||
*/
|
||||
if (dev->fn_tbl->postsetup)
|
||||
dev->fn_tbl->postsetup(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Open file.
|
||||
*
|
||||
* \param inode device inode
|
||||
* \param filp file pointer.
|
||||
* \return zero on success or a negative number on failure.
|
||||
*
|
||||
* Searches the DRM device with the same minor number, calls open_helper(), and
|
||||
* increments the device open count. If the open count was previous at zero,
|
||||
* i.e., it's the first that the device is open, then calls setup().
|
||||
*/
|
||||
int drm_open( struct inode *inode, struct file *filp )
|
||||
{
|
||||
drm_device_t *dev = NULL;
|
||||
int minor = iminor(inode);
|
||||
int retcode = 0;
|
||||
|
||||
if (!((minor >= 0) && (minor < cards_limit)))
|
||||
return -ENODEV;
|
||||
|
||||
dev = drm_minors[minor].dev;
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
retcode = drm_open_helper( inode, filp, dev );
|
||||
if ( !retcode ) {
|
||||
atomic_inc( &dev->counts[_DRM_STAT_OPENS] );
|
||||
spin_lock( &dev->count_lock );
|
||||
if ( !dev->open_count++ ) {
|
||||
spin_unlock( &dev->count_lock );
|
||||
return drm_setup( dev );
|
||||
}
|
||||
spin_unlock( &dev->count_lock );
|
||||
}
|
||||
|
||||
return retcode;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_open);
|
||||
|
||||
/**
|
||||
* File \c open operation.
|
||||
*
|
||||
* \param inode device inode.
|
||||
* \param filp file pointer.
|
||||
*
|
||||
* Puts the dev->fops corresponding to the device minor number into
|
||||
* \p filp, call the \c open method, and restore the file operations.
|
||||
*/
|
||||
int drm_stub_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
drm_device_t *dev = NULL;
|
||||
int minor = iminor(inode);
|
||||
int err = -ENODEV;
|
||||
struct file_operations *old_fops;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
if (!((minor >= 0) && (minor < cards_limit)))
|
||||
return -ENODEV;
|
||||
|
||||
dev = drm_minors[minor].dev;
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
old_fops = filp->f_op;
|
||||
filp->f_op = fops_get(&dev->fn_tbl->fops);
|
||||
if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
|
||||
fops_put(filp->f_op);
|
||||
filp->f_op = fops_get(old_fops);
|
||||
}
|
||||
fops_put(old_fops);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* Called whenever a process opens /dev/drm.
|
||||
|
@ -128,3 +278,168 @@ int drm_fasync(int fd, struct file *filp, int on)
|
|||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fasync);
|
||||
|
||||
/**
|
||||
* Release file.
|
||||
*
|
||||
* \param inode device inode
|
||||
* \param filp file pointer.
|
||||
* \return zero on success or a negative number on failure.
|
||||
*
|
||||
* If the hardware lock is held then free it, and take it again for the kernel
|
||||
* context since it's necessary to reclaim buffers. Unlink the file private
|
||||
* data from its list and free it. Decreases the open count and if it reaches
|
||||
* zero calls takedown().
|
||||
*/
|
||||
int drm_release( struct inode *inode, struct file *filp )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev;
|
||||
int retcode = 0;
|
||||
|
||||
lock_kernel();
|
||||
dev = priv->dev;
|
||||
|
||||
DRM_DEBUG( "open_count = %d\n", dev->open_count );
|
||||
|
||||
if (dev->fn_tbl->prerelease)
|
||||
dev->fn_tbl->prerelease(dev, filp);
|
||||
|
||||
/* ========================================================
|
||||
* Begin inline drm_release
|
||||
*/
|
||||
|
||||
DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n",
|
||||
current->pid, (long)old_encode_dev(dev->device), dev->open_count );
|
||||
|
||||
if ( priv->lock_count && dev->lock.hw_lock &&
|
||||
_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
|
||||
dev->lock.filp == filp ) {
|
||||
DRM_DEBUG( "File %p released, freeing lock for context %d\n",
|
||||
filp,
|
||||
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
|
||||
|
||||
if (dev->fn_tbl->release)
|
||||
dev->fn_tbl->release(dev, filp);
|
||||
|
||||
drm_lock_free( dev, &dev->lock.hw_lock->lock,
|
||||
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
|
||||
|
||||
/* FIXME: may require heavy-handed reset of
|
||||
hardware at this point, possibly
|
||||
processed via a callback to the X
|
||||
server. */
|
||||
}
|
||||
else if ( dev->fn_tbl->release && priv->lock_count && dev->lock.hw_lock ) {
|
||||
/* The lock is required to reclaim buffers */
|
||||
DECLARE_WAITQUEUE( entry, current );
|
||||
|
||||
add_wait_queue( &dev->lock.lock_queue, &entry );
|
||||
for (;;) {
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
if ( !dev->lock.hw_lock ) {
|
||||
/* Device has been unregistered */
|
||||
retcode = -EINTR;
|
||||
break;
|
||||
}
|
||||
if ( drm_lock_take( &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT ) ) {
|
||||
dev->lock.filp = filp;
|
||||
dev->lock.lock_time = jiffies;
|
||||
atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
|
||||
break; /* Got lock */
|
||||
}
|
||||
/* Contention */
|
||||
schedule();
|
||||
if ( signal_pending( current ) ) {
|
||||
retcode = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
}
|
||||
current->state = TASK_RUNNING;
|
||||
remove_wait_queue( &dev->lock.lock_queue, &entry );
|
||||
if( !retcode ) {
|
||||
if (dev->fn_tbl->release)
|
||||
dev->fn_tbl->release(dev, filp);
|
||||
drm_lock_free( dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT );
|
||||
}
|
||||
}
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
|
||||
{
|
||||
dev->fn_tbl->reclaim_buffers(filp);
|
||||
}
|
||||
|
||||
drm_fasync( -1, filp, 0 );
|
||||
|
||||
down( &dev->ctxlist_sem );
|
||||
if ( !list_empty( &dev->ctxlist->head ) ) {
|
||||
drm_ctx_list_t *pos, *n;
|
||||
|
||||
list_for_each_entry_safe( pos, n, &dev->ctxlist->head, head ) {
|
||||
if ( pos->tag == priv &&
|
||||
pos->handle != DRM_KERNEL_CONTEXT ) {
|
||||
if (dev->fn_tbl->context_dtor)
|
||||
dev->fn_tbl->context_dtor(dev, pos->handle);
|
||||
|
||||
drm_ctxbitmap_free( dev, pos->handle );
|
||||
|
||||
list_del( &pos->head );
|
||||
drm_free( pos, sizeof(*pos), DRM_MEM_CTXLIST );
|
||||
--dev->ctx_count;
|
||||
}
|
||||
}
|
||||
}
|
||||
up( &dev->ctxlist_sem );
|
||||
|
||||
down( &dev->struct_sem );
|
||||
if ( priv->remove_auth_on_close == 1 ) {
|
||||
drm_file_t *temp = dev->file_first;
|
||||
while ( temp ) {
|
||||
temp->authenticated = 0;
|
||||
temp = temp->next;
|
||||
}
|
||||
}
|
||||
if ( priv->prev ) {
|
||||
priv->prev->next = priv->next;
|
||||
} else {
|
||||
dev->file_first = priv->next;
|
||||
}
|
||||
if ( priv->next ) {
|
||||
priv->next->prev = priv->prev;
|
||||
} else {
|
||||
dev->file_last = priv->prev;
|
||||
}
|
||||
up( &dev->struct_sem );
|
||||
|
||||
if (dev->fn_tbl->free_filp_priv)
|
||||
dev->fn_tbl->free_filp_priv( dev, priv );
|
||||
drm_free( priv, sizeof(*priv), DRM_MEM_FILES );
|
||||
|
||||
/* ========================================================
|
||||
* End inline drm_release
|
||||
*/
|
||||
|
||||
atomic_inc( &dev->counts[_DRM_STAT_CLOSES] );
|
||||
spin_lock( &dev->count_lock );
|
||||
if ( !--dev->open_count ) {
|
||||
if ( atomic_read( &dev->ioctl_count ) || dev->blocked ) {
|
||||
DRM_ERROR( "Device busy: %d %d\n",
|
||||
atomic_read( &dev->ioctl_count ),
|
||||
dev->blocked );
|
||||
spin_unlock( &dev->count_lock );
|
||||
unlock_kernel();
|
||||
return -EBUSY;
|
||||
}
|
||||
spin_unlock( &dev->count_lock );
|
||||
unlock_kernel();
|
||||
return drm_takedown( dev );
|
||||
}
|
||||
spin_unlock( &dev->count_lock );
|
||||
|
||||
unlock_kernel();
|
||||
|
||||
return retcode;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_release);
|
||||
|
|
|
@ -304,6 +304,17 @@ int drm_getstats( struct inode *inode, struct file *filp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Setversion ioctl.
|
||||
*
|
||||
* \param inode device inode.
|
||||
* \param filp file pointer.
|
||||
* \param cmd command.
|
||||
* \param arg user argument, pointing to a drm_lock structure.
|
||||
* \return zero on success or negative number on failure.
|
||||
*
|
||||
* Sets the requested interface version
|
||||
*/
|
||||
int drm_setversion(DRM_IOCTL_ARGS)
|
||||
{
|
||||
DRM_DEVICE;
|
||||
|
@ -345,3 +356,11 @@ int drm_setversion(DRM_IOCTL_ARGS)
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** No-op ioctl. */
|
||||
int drm_noop(struct inode *inode, struct file *filp, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
DRM_DEBUG("\n");
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -35,11 +35,143 @@
|
|||
|
||||
#include "drmP.h"
|
||||
|
||||
/** No-op ioctl. */
|
||||
int drm_noop(struct inode *inode, struct file *filp, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
|
||||
/**
|
||||
* Lock ioctl.
|
||||
*
|
||||
* \param inode device inode.
|
||||
* \param filp file pointer.
|
||||
* \param cmd command.
|
||||
* \param arg user argument, pointing to a drm_lock structure.
|
||||
* \return zero on success or negative number on failure.
|
||||
*
|
||||
* Add the current task to the lock wait queue, and attempt to take to lock.
|
||||
*/
|
||||
int drm_lock( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
DRM_DEBUG("\n");
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
DECLARE_WAITQUEUE( entry, current );
|
||||
drm_lock_t lock;
|
||||
int ret = 0;
|
||||
|
||||
++priv->lock_count;
|
||||
|
||||
if ( copy_from_user( &lock, (drm_lock_t __user *)arg, sizeof(lock) ) )
|
||||
return -EFAULT;
|
||||
|
||||
if ( lock.context == DRM_KERNEL_CONTEXT ) {
|
||||
DRM_ERROR( "Process %d using kernel context %d\n",
|
||||
current->pid, lock.context );
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_DEBUG( "%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
|
||||
lock.context, current->pid,
|
||||
dev->lock.hw_lock->lock, lock.flags );
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
|
||||
if ( lock.context < 0 )
|
||||
return -EINVAL;
|
||||
|
||||
add_wait_queue( &dev->lock.lock_queue, &entry );
|
||||
for (;;) {
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
if ( !dev->lock.hw_lock ) {
|
||||
/* Device has been unregistered */
|
||||
ret = -EINTR;
|
||||
break;
|
||||
}
|
||||
if ( drm_lock_take( &dev->lock.hw_lock->lock,
|
||||
lock.context ) ) {
|
||||
dev->lock.filp = filp;
|
||||
dev->lock.lock_time = jiffies;
|
||||
atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
|
||||
break; /* Got lock */
|
||||
}
|
||||
|
||||
/* Contention */
|
||||
schedule();
|
||||
if ( signal_pending( current ) ) {
|
||||
ret = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
}
|
||||
current->state = TASK_RUNNING;
|
||||
remove_wait_queue( &dev->lock.lock_queue, &entry );
|
||||
|
||||
sigemptyset( &dev->sigmask );
|
||||
sigaddset( &dev->sigmask, SIGSTOP );
|
||||
sigaddset( &dev->sigmask, SIGTSTP );
|
||||
sigaddset( &dev->sigmask, SIGTTIN );
|
||||
sigaddset( &dev->sigmask, SIGTTOU );
|
||||
dev->sigdata.context = lock.context;
|
||||
dev->sigdata.lock = dev->lock.hw_lock;
|
||||
block_all_signals( drm_notifier,
|
||||
&dev->sigdata, &dev->sigmask );
|
||||
|
||||
if (dev->fn_tbl->dma_ready && (lock.flags & _DRM_LOCK_READY))
|
||||
dev->fn_tbl->dma_ready(dev);
|
||||
|
||||
if ( dev->fn_tbl->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT ))
|
||||
return dev->fn_tbl->dma_quiescent(dev);
|
||||
|
||||
|
||||
if ( dev->fn_tbl->kernel_context_switch && dev->last_context != lock.context ) {
|
||||
dev->fn_tbl->kernel_context_switch(dev, dev->last_context,
|
||||
lock.context);
|
||||
}
|
||||
|
||||
|
||||
DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Unlock ioctl.
|
||||
*
|
||||
* \param inode device inode.
|
||||
* \param filp file pointer.
|
||||
* \param cmd command.
|
||||
* \param arg user argument, pointing to a drm_lock structure.
|
||||
* \return zero on success or negative number on failure.
|
||||
*
|
||||
* Transfer and free the lock.
|
||||
*/
|
||||
int drm_unlock( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_lock_t lock;
|
||||
|
||||
if ( copy_from_user( &lock, (drm_lock_t __user *)arg, sizeof(lock) ) )
|
||||
return -EFAULT;
|
||||
|
||||
if ( lock.context == DRM_KERNEL_CONTEXT ) {
|
||||
DRM_ERROR( "Process %d using kernel context %d\n",
|
||||
current->pid, lock.context );
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] );
|
||||
|
||||
if (dev->fn_tbl->kernel_context_switch_unlock)
|
||||
dev->fn_tbl->kernel_context_switch_unlock(dev);
|
||||
else
|
||||
{
|
||||
drm_lock_transfer( dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT );
|
||||
|
||||
if ( drm_lock_free( dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT ) ) {
|
||||
DRM_ERROR( "\n" );
|
||||
}
|
||||
}
|
||||
|
||||
unblock_all_signals();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -51,49 +51,86 @@ drm_minor_t *drm_minors;
|
|||
struct drm_sysfs_class *drm_class;
|
||||
struct proc_dir_entry *drm_proc_root;
|
||||
|
||||
/**
|
||||
* File \c open operation.
|
||||
*
|
||||
* \param inode device inode.
|
||||
* \param filp file pointer.
|
||||
*
|
||||
* Puts the dev->fops corresponding to the device minor number into
|
||||
* \p filp, call the \c open method, and restore the file operations.
|
||||
*/
|
||||
static int stub_open(struct inode *inode, struct file *filp)
|
||||
static int fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver_fn *driver_fn)
|
||||
{
|
||||
drm_device_t *dev = NULL;
|
||||
int minor = iminor(inode);
|
||||
int err = -ENODEV;
|
||||
struct file_operations *old_fops;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
int retcode;
|
||||
|
||||
if (!((minor >= 0) && (minor < cards_limit)))
|
||||
return -ENODEV;
|
||||
dev->count_lock = SPIN_LOCK_UNLOCKED;
|
||||
init_timer( &dev->timer );
|
||||
sema_init( &dev->struct_sem, 1 );
|
||||
sema_init( &dev->ctxlist_sem, 1 );
|
||||
|
||||
dev = drm_minors[minor].dev;
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
dev->name = DRIVER_NAME;
|
||||
dev->pdev = pdev;
|
||||
|
||||
old_fops = filp->f_op;
|
||||
filp->f_op = fops_get(&dev->fn_tbl->fops);
|
||||
if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
|
||||
fops_put(filp->f_op);
|
||||
filp->f_op = fops_get(old_fops);
|
||||
#ifdef __alpha__
|
||||
dev->hose = pdev->sysdata;
|
||||
dev->pci_domain = dev->hose->bus->number;
|
||||
#else
|
||||
dev->pci_domain = 0;
|
||||
#endif
|
||||
dev->pci_bus = pdev->bus->number;
|
||||
dev->pci_slot = PCI_SLOT(pdev->devfn);
|
||||
dev->pci_func = PCI_FUNC(pdev->devfn);
|
||||
dev->irq = pdev->irq;
|
||||
|
||||
dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS);
|
||||
if(dev->maplist == NULL) return -ENOMEM;
|
||||
INIT_LIST_HEAD(&dev->maplist->head);
|
||||
|
||||
/* the DRM has 6 counters */
|
||||
dev->counters = 6;
|
||||
dev->types[0] = _DRM_STAT_LOCK;
|
||||
dev->types[1] = _DRM_STAT_OPENS;
|
||||
dev->types[2] = _DRM_STAT_CLOSES;
|
||||
dev->types[3] = _DRM_STAT_IOCTLS;
|
||||
dev->types[4] = _DRM_STAT_LOCKS;
|
||||
dev->types[5] = _DRM_STAT_UNLOCKS;
|
||||
|
||||
dev->fn_tbl = driver_fn;
|
||||
|
||||
if (dev->fn_tbl->preinit)
|
||||
if ((retcode = dev->fn_tbl->preinit(dev, ent->driver_data)))
|
||||
goto error_out_unreg;
|
||||
|
||||
if (drm_core_has_AGP(dev)) {
|
||||
dev->agp = drm_agp_init();
|
||||
if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && (dev->agp == NULL)) {
|
||||
DRM_ERROR( "Cannot initialize the agpgart module.\n" );
|
||||
retcode = -EINVAL;
|
||||
goto error_out_unreg;
|
||||
}
|
||||
|
||||
|
||||
if (drm_core_has_MTRR(dev)) {
|
||||
if (dev->agp)
|
||||
dev->agp->agp_mtrr = mtrr_add( dev->agp->agp_info.aper_base,
|
||||
dev->agp->agp_info.aper_size*1024*1024,
|
||||
MTRR_TYPE_WRCOMB,
|
||||
1 );
|
||||
}
|
||||
}
|
||||
fops_put(old_fops);
|
||||
|
||||
return err;
|
||||
retcode = drm_ctxbitmap_init( dev );
|
||||
if( retcode ) {
|
||||
DRM_ERROR( "Cannot allocate memory for context bitmap.\n" );
|
||||
goto error_out_unreg;
|
||||
}
|
||||
|
||||
dev->device = MKDEV(DRM_MAJOR, dev->minor );
|
||||
|
||||
/* postinit is a required function to display the signon banner */
|
||||
/* drivers add secondary heads here if needed */
|
||||
if ((retcode = dev->fn_tbl->postinit(dev, ent->driver_data)))
|
||||
goto error_out_unreg;
|
||||
|
||||
return 0;
|
||||
|
||||
error_out_unreg:
|
||||
drm_takedown(dev);
|
||||
return retcode;
|
||||
}
|
||||
|
||||
/** File operations structure */
|
||||
struct file_operations drm_stub_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = stub_open
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Register.
|
||||
*
|
||||
|
@ -125,7 +162,7 @@ int drm_probe(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_
|
|||
|
||||
*minors = (drm_minor_t){.dev = dev, .class = DRM_MINOR_PRIMARY};
|
||||
dev->minor = minor;
|
||||
if ((ret = drm_fill_in_dev(dev, pdev, ent, driver_fn))) {
|
||||
if ((ret = fill_in_dev(dev, pdev, ent, driver_fn))) {
|
||||
printk (KERN_ERR "DRM: Fill_in_dev failed.\n");
|
||||
goto err_g1;
|
||||
}
|
||||
|
|
|
@ -543,7 +543,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
|
|||
dev_priv->ring.end = ((u32 *)dev_priv->cce_ring->handle
|
||||
+ init->ring_size / sizeof(u32));
|
||||
dev_priv->ring.size = init->ring_size;
|
||||
dev_priv->ring.size_l2qw = drm_order( init->ring_size / 8 );
|
||||
dev_priv->ring.size_l2qw = get_order( init->ring_size / 8 );
|
||||
|
||||
dev_priv->ring.tail_mask =
|
||||
(dev_priv->ring.size / sizeof(u32)) - 1;
|
||||
|
|
|
@ -1215,7 +1215,7 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
|||
dev_priv->ring.end = ((u32 *)dev_priv->cp_ring->handle
|
||||
+ init->ring_size / sizeof(u32));
|
||||
dev_priv->ring.size = init->ring_size;
|
||||
dev_priv->ring.size_l2qw = drm_order( init->ring_size / 8 );
|
||||
dev_priv->ring.size_l2qw = get_order( init->ring_size / 8 );
|
||||
|
||||
dev_priv->ring.tail_mask =
|
||||
(dev_priv->ring.size / sizeof(u32)) - 1;
|
||||
|
|
Loading…
Reference in New Issue