Lindent of core build. Drivers checked for no binary diffs. A few files

weren't Lindent's because their comments didn't convert very well. A
    bunch of other minor clean up with no code implact included.
main
Jon Smirl 2004-09-30 21:12:10 +00:00
parent 368493edc9
commit 9f9a8f1382
98 changed files with 19795 additions and 19980 deletions

View File

@ -52,48 +52,47 @@
# define ATI_MAX_PCIGART_PAGES 8192 /**< 32 MB aperture, 4K pages */
# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
static unsigned long drm_ati_alloc_pcigart_table( void )
static unsigned long drm_ati_alloc_pcigart_table(void)
{
unsigned long address;
struct page *page;
int i;
DRM_DEBUG( "%s\n", __FUNCTION__ );
DRM_DEBUG("%s\n", __FUNCTION__);
address = __get_free_pages( GFP_KERNEL, ATI_PCIGART_TABLE_ORDER );
if ( address == 0UL ) {
address = __get_free_pages(GFP_KERNEL, ATI_PCIGART_TABLE_ORDER);
if (address == 0UL) {
return 0;
}
page = virt_to_page( address );
page = virt_to_page(address);
for ( i = 0 ; i < ATI_PCIGART_TABLE_PAGES ; i++, page++ ) {
for (i = 0; i < ATI_PCIGART_TABLE_PAGES; i++, page++) {
get_page(page);
SetPageReserved( page );
SetPageReserved(page);
}
DRM_DEBUG( "%s: returning 0x%08lx\n", __FUNCTION__, address );
DRM_DEBUG("%s: returning 0x%08lx\n", __FUNCTION__, address);
return address;
}
static void drm_ati_free_pcigart_table( unsigned long address )
static void drm_ati_free_pcigart_table(unsigned long address)
{
struct page *page;
int i;
DRM_DEBUG( "%s\n", __FUNCTION__ );
DRM_DEBUG("%s\n", __FUNCTION__);
page = virt_to_page( address );
page = virt_to_page(address);
for ( i = 0 ; i < ATI_PCIGART_TABLE_PAGES ; i++, page++ ) {
for (i = 0; i < ATI_PCIGART_TABLE_PAGES; i++, page++) {
__put_page(page);
ClearPageReserved( page );
ClearPageReserved(page);
}
free_pages( address, ATI_PCIGART_TABLE_ORDER );
free_pages(address, ATI_PCIGART_TABLE_ORDER);
}
int drm_ati_pcigart_init( drm_device_t *dev,
unsigned long *addr,
dma_addr_t *bus_addr)
int drm_ati_pcigart_init(drm_device_t * dev,
unsigned long *addr, dma_addr_t * bus_addr)
{
drm_sg_mem_t *entry = dev->sg;
unsigned long address = 0;
@ -101,48 +100,48 @@ int drm_ati_pcigart_init( drm_device_t *dev,
u32 *pci_gart, page_base, bus_address = 0;
int i, j, ret = 0;
if ( !entry ) {
DRM_ERROR( "no scatter/gather memory!\n" );
if (!entry) {
DRM_ERROR("no scatter/gather memory!\n");
goto done;
}
address = drm_ati_alloc_pcigart_table();
if ( !address ) {
DRM_ERROR( "cannot allocate PCI GART page!\n" );
if (!address) {
DRM_ERROR("cannot allocate PCI GART page!\n");
goto done;
}
if ( !dev->pdev ) {
DRM_ERROR( "PCI device unknown!\n" );
if (!dev->pdev) {
DRM_ERROR("PCI device unknown!\n");
goto done;
}
bus_address = pci_map_single(dev->pdev, (void *)address,
ATI_PCIGART_TABLE_PAGES * PAGE_SIZE,
PCI_DMA_TODEVICE);
ATI_PCIGART_TABLE_PAGES * PAGE_SIZE,
PCI_DMA_TODEVICE);
if (bus_address == 0) {
DRM_ERROR( "unable to map PCIGART pages!\n" );
drm_ati_free_pcigart_table( address );
DRM_ERROR("unable to map PCIGART pages!\n");
drm_ati_free_pcigart_table(address);
address = 0;
goto done;
}
pci_gart = (u32 *)address;
pci_gart = (u32 *) address;
pages = ( entry->pages <= ATI_MAX_PCIGART_PAGES )
? entry->pages : ATI_MAX_PCIGART_PAGES;
pages = (entry->pages <= ATI_MAX_PCIGART_PAGES)
? entry->pages : ATI_MAX_PCIGART_PAGES;
memset( pci_gart, 0, ATI_MAX_PCIGART_PAGES * sizeof(u32) );
memset(pci_gart, 0, ATI_MAX_PCIGART_PAGES * sizeof(u32));
for ( i = 0 ; i < pages ; i++ ) {
for (i = 0; i < pages; i++) {
/* we need to support large memory configurations */
entry->busaddr[i] = pci_map_single(dev->pdev,
page_address( entry->pagelist[i] ),
PAGE_SIZE,
PCI_DMA_TODEVICE);
page_address(entry->
pagelist[i]),
PAGE_SIZE, PCI_DMA_TODEVICE);
if (entry->busaddr[i] == 0) {
DRM_ERROR( "unable to map PCIGART pages!\n" );
drm_ati_pcigart_cleanup( dev, address, bus_address );
DRM_ERROR("unable to map PCIGART pages!\n");
drm_ati_pcigart_cleanup(dev, address, bus_address);
address = 0;
bus_address = 0;
goto done;
@ -150,7 +149,7 @@ int drm_ati_pcigart_init( drm_device_t *dev,
page_base = (u32) entry->busaddr[i];
for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
*pci_gart++ = cpu_to_le32( page_base );
*pci_gart++ = cpu_to_le32(page_base);
page_base += ATI_PCIGART_PAGE_SIZE;
}
}
@ -158,48 +157,48 @@ int drm_ati_pcigart_init( drm_device_t *dev,
ret = 1;
#if defined(__i386__) || defined(__x86_64__)
asm volatile ( "wbinvd" ::: "memory" );
asm volatile ("wbinvd":::"memory");
#else
mb();
#endif
done:
done:
*addr = address;
*bus_addr = bus_address;
return ret;
}
int drm_ati_pcigart_cleanup( drm_device_t *dev,
unsigned long addr,
dma_addr_t bus_addr)
int drm_ati_pcigart_cleanup(drm_device_t * dev,
unsigned long addr, dma_addr_t bus_addr)
{
drm_sg_mem_t *entry = dev->sg;
unsigned long pages;
int i;
/* we need to support large memory configurations */
if ( !entry ) {
DRM_ERROR( "no scatter/gather memory!\n" );
if (!entry) {
DRM_ERROR("no scatter/gather memory!\n");
return 0;
}
if ( bus_addr ) {
if (bus_addr) {
pci_unmap_single(dev->pdev, bus_addr,
ATI_PCIGART_TABLE_PAGES * PAGE_SIZE,
PCI_DMA_TODEVICE);
pages = ( entry->pages <= ATI_MAX_PCIGART_PAGES )
? entry->pages : ATI_MAX_PCIGART_PAGES;
pages = (entry->pages <= ATI_MAX_PCIGART_PAGES)
? entry->pages : ATI_MAX_PCIGART_PAGES;
for ( i = 0 ; i < pages ; i++ ) {
if ( !entry->busaddr[i] ) break;
for (i = 0; i < pages; i++) {
if (!entry->busaddr[i])
break;
pci_unmap_single(dev->pdev, entry->busaddr[i],
PAGE_SIZE, PCI_DMA_TODEVICE);
}
}
if ( addr ) {
drm_ati_free_pcigart_table( addr );
if (addr) {
drm_ati_free_pcigart_table(addr);
}
return 1;

File diff suppressed because it is too large Load Diff

View File

@ -54,28 +54,28 @@ const drm_agp_t *drm_agp = NULL;
* drm_agp_info structure with the information in drm_agp_head::agp_info.
*/
int drm_agp_info(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
DRM_AGP_KERN *kern;
drm_agp_info_t info;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
DRM_AGP_KERN *kern;
drm_agp_info_t info;
if (!dev->agp || !dev->agp->acquired || !drm_agp->copy_info)
return -EINVAL;
kern = &dev->agp->agp_info;
kern = &dev->agp->agp_info;
info.agp_version_major = kern->version.major;
info.agp_version_minor = kern->version.minor;
info.mode = kern->mode;
info.aperture_base = kern->aper_base;
info.aperture_size = kern->aper_size * 1024 * 1024;
info.memory_allowed = kern->max_memory << PAGE_SHIFT;
info.memory_used = kern->current_memory << PAGE_SHIFT;
info.id_vendor = kern->device->vendor;
info.id_device = kern->device->device;
info.mode = kern->mode;
info.aperture_base = kern->aper_base;
info.aperture_size = kern->aper_size * 1024 * 1024;
info.memory_allowed = kern->max_memory << PAGE_SHIFT;
info.memory_used = kern->current_memory << PAGE_SHIFT;
info.id_vendor = kern->device->vendor;
info.id_device = kern->device->device;
if (copy_to_user((drm_agp_info_t __user *)arg, &info, sizeof(info)))
if (copy_to_user((drm_agp_info_t __user *) arg, &info, sizeof(info)))
return -EFAULT;
return 0;
}
@ -93,11 +93,11 @@ int drm_agp_info(struct inode *inode, struct file *filp,
* drm_agp->acquire().
*/
int drm_agp_acquire(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int retcode;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int retcode;
if (!dev->agp)
return -ENODEV;
@ -106,7 +106,7 @@ int drm_agp_acquire(struct inode *inode, struct file *filp,
if (!drm_agp->acquire)
return -EINVAL;
#ifndef VMAP_4_ARGS
if ( dev->agp->cant_use_aperture )
if (dev->agp->cant_use_aperture)
return -EINVAL;
#endif
if ((retcode = drm_agp->acquire()))
@ -127,10 +127,10 @@ int drm_agp_acquire(struct inode *inode, struct file *filp,
* Verifies the AGP device has been acquired and calls drm_agp->release().
*/
int drm_agp_release(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
if (!dev->agp || !dev->agp->acquired || !drm_agp->release)
return -EINVAL;
@ -164,21 +164,21 @@ void drm_agp_do_release(void)
* drm_agp->enable().
*/
int drm_agp_enable(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_agp_mode_t mode;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_agp_mode_t mode;
if (!dev->agp || !dev->agp->acquired || !drm_agp->enable)
return -EINVAL;
if (copy_from_user(&mode, (drm_agp_mode_t __user *)arg, sizeof(mode)))
if (copy_from_user(&mode, (drm_agp_mode_t __user *) arg, sizeof(mode)))
return -EFAULT;
dev->agp->mode = mode.mode;
dev->agp->mode = mode.mode;
drm_agp->enable(mode.mode);
dev->agp->base = dev->agp->agp_info.aper_base;
dev->agp->base = dev->agp->agp_info.aper_base;
dev->agp->enabled = 1;
return 0;
}
@ -196,15 +196,15 @@ int drm_agp_enable(struct inode *inode, struct file *filp,
* memory via alloc_agp() and creates a drm_agp_mem entry for it.
*/
int drm_agp_alloc(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_agp_buffer_t request;
drm_agp_mem_t *entry;
DRM_AGP_MEM *memory;
unsigned long pages;
u32 type;
drm_agp_mem_t *entry;
DRM_AGP_MEM *memory;
unsigned long pages;
u32 type;
drm_agp_buffer_t __user *argp = (void __user *)arg;
if (!dev->agp || !dev->agp->acquired)
@ -214,7 +214,7 @@ int drm_agp_alloc(struct inode *inode, struct file *filp,
if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS)))
return -ENOMEM;
memset(entry, 0, sizeof(*entry));
memset(entry, 0, sizeof(*entry));
pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE;
type = (u32) request.type;
@ -224,21 +224,21 @@ int drm_agp_alloc(struct inode *inode, struct file *filp,
return -ENOMEM;
}
entry->handle = (unsigned long)memory->key + 1;
entry->memory = memory;
entry->bound = 0;
entry->pages = pages;
entry->prev = NULL;
entry->next = dev->agp->memory;
entry->handle = (unsigned long)memory->key + 1;
entry->memory = memory;
entry->bound = 0;
entry->pages = pages;
entry->prev = NULL;
entry->next = dev->agp->memory;
if (dev->agp->memory)
dev->agp->memory->prev = entry;
dev->agp->memory = entry;
request.handle = entry->handle;
request.handle = entry->handle;
request.physical = memory->physical;
if (copy_to_user(argp, &request, sizeof(request))) {
dev->agp->memory = entry->next;
dev->agp->memory = entry->next;
dev->agp->memory->prev = NULL;
drm_free_agp(memory, pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
@ -256,8 +256,8 @@ int drm_agp_alloc(struct inode *inode, struct file *filp,
*
* Walks through drm_agp_head::memory until finding a matching handle.
*/
static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t *dev,
unsigned long handle)
static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev,
unsigned long handle)
{
drm_agp_mem_t *entry;
@ -281,17 +281,18 @@ static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t *dev,
* entry and passes it to the unbind_agp() function.
*/
int drm_agp_unbind(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_agp_binding_t request;
drm_agp_mem_t *entry;
drm_agp_mem_t *entry;
int ret;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
if (copy_from_user(&request, (drm_agp_binding_t __user *)arg, sizeof(request)))
if (copy_from_user
(&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
return -EFAULT;
if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
return -EINVAL;
@ -299,7 +300,7 @@ int drm_agp_unbind(struct inode *inode, struct file *filp,
return -EINVAL;
ret = drm_unbind_agp(entry->memory);
if (ret == 0)
entry->bound = 0;
entry->bound = 0;
return ret;
}
@ -317,18 +318,19 @@ int drm_agp_unbind(struct inode *inode, struct file *filp,
* it to bind_agp() function.
*/
int drm_agp_bind(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_agp_binding_t request;
drm_agp_mem_t *entry;
int retcode;
int page;
drm_agp_mem_t *entry;
int retcode;
int page;
if (!dev->agp || !dev->agp->acquired || !drm_agp->bind_memory)
return -EINVAL;
if (copy_from_user(&request, (drm_agp_binding_t __user *)arg, sizeof(request)))
if (copy_from_user
(&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
return -EFAULT;
if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
return -EINVAL;
@ -358,16 +360,17 @@ int drm_agp_bind(struct inode *inode, struct file *filp,
* and unlinks from the doubly linked list it's inserted in.
*/
int drm_agp_free(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_agp_buffer_t request;
drm_agp_mem_t *entry;
drm_agp_mem_t *entry;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
if (copy_from_user(&request, (drm_agp_buffer_t __user *)arg, sizeof(request)))
if (copy_from_user
(&request, (drm_agp_buffer_t __user *) arg, sizeof(request)))
return -EFAULT;
if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
return -EINVAL;
@ -430,7 +433,7 @@ DRM_AGP_MEM *drm_agp_allocate_memory(size_t pages, u32 type)
}
/** Calls drm_agp->free_memory() */
int drm_agp_free_memory(DRM_AGP_MEM *handle)
int drm_agp_free_memory(DRM_AGP_MEM * handle)
{
if (!handle || !drm_agp->free_memory)
return 0;
@ -439,7 +442,7 @@ int drm_agp_free_memory(DRM_AGP_MEM *handle)
}
/** Calls drm_agp->bind_memory() */
int drm_agp_bind_memory(DRM_AGP_MEM *handle, off_t start)
int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start)
{
if (!handle || !drm_agp->bind_memory)
return -EINVAL;
@ -447,11 +450,11 @@ int drm_agp_bind_memory(DRM_AGP_MEM *handle, off_t start)
}
/** Calls drm_agp->unbind_memory() */
int drm_agp_unbind_memory(DRM_AGP_MEM *handle)
int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
{
if (!handle || !drm_agp->unbind_memory)
return -EINVAL;
return drm_agp->unbind_memory(handle);
}
#endif /* __OS_HAS_AGP */
#endif /* __OS_HAS_AGP */

View File

@ -46,7 +46,7 @@
*/
static int drm_hash_magic(drm_magic_t magic)
{
return magic & (DRM_HASH_SIZE-1);
return magic & (DRM_HASH_SIZE - 1);
}
/**
@ -59,11 +59,11 @@ static int drm_hash_magic(drm_magic_t magic)
* the one with matching magic number, while holding the drm_device::struct_sem
* lock.
*/
static drm_file_t *drm_find_file(drm_device_t *dev, drm_magic_t magic)
static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic)
{
drm_file_t *retval = NULL;
drm_file_t *retval = NULL;
drm_magic_entry_t *pt;
int hash = drm_hash_magic(magic);
int hash = drm_hash_magic(magic);
down(&dev->struct_sem);
for (pt = dev->magiclist[hash].head; pt; pt = pt->next) {
@ -87,28 +87,29 @@ static drm_file_t *drm_find_file(drm_device_t *dev, drm_magic_t magic)
* associated the magic number hash key in drm_device::magiclist, while holding
* the drm_device::struct_sem lock.
*/
int drm_add_magic(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic)
int drm_add_magic(drm_device_t * dev, drm_file_t * priv, drm_magic_t magic)
{
int hash;
int hash;
drm_magic_entry_t *entry;
DRM_DEBUG("%d\n", magic);
hash = drm_hash_magic(magic);
entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC);
if (!entry) return -ENOMEM;
hash = drm_hash_magic(magic);
entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC);
if (!entry)
return -ENOMEM;
memset(entry, 0, sizeof(*entry));
entry->magic = magic;
entry->priv = priv;
entry->next = NULL;
entry->priv = priv;
entry->next = NULL;
down(&dev->struct_sem);
if (dev->magiclist[hash].tail) {
dev->magiclist[hash].tail->next = entry;
dev->magiclist[hash].tail = entry;
dev->magiclist[hash].tail = entry;
} else {
dev->magiclist[hash].head = entry;
dev->magiclist[hash].tail = entry;
dev->magiclist[hash].head = entry;
dev->magiclist[hash].tail = entry;
}
up(&dev->struct_sem);
@ -124,12 +125,11 @@ int drm_add_magic(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic)
* Searches and unlinks the entry in drm_device::magiclist with the magic
* number hash key, while holding the drm_device::struct_sem lock.
*/
int drm_remove_magic(drm_device_t *dev, drm_magic_t magic)
int drm_remove_magic(drm_device_t * dev, drm_magic_t magic)
{
drm_magic_entry_t *prev = NULL;
drm_magic_entry_t *pt;
int hash;
int hash;
DRM_DEBUG("%d\n", magic);
hash = drm_hash_magic(magic);
@ -171,21 +171,22 @@ int drm_remove_magic(drm_device_t *dev, drm_magic_t magic)
* filp.
*/
int drm_getmagic(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
unsigned int cmd, unsigned long arg)
{
static drm_magic_t sequence = 0;
static spinlock_t lock = SPIN_LOCK_UNLOCKED;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_auth_t auth;
static spinlock_t lock = SPIN_LOCK_UNLOCKED;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_auth_t auth;
/* Find unique magic */
/* Find unique magic */
if (priv->magic) {
auth.magic = priv->magic;
} else {
do {
spin_lock(&lock);
if (!sequence) ++sequence; /* reserve 0 */
if (!sequence)
++sequence; /* reserve 0 */
auth.magic = sequence++;
spin_unlock(&lock);
} while (drm_find_file(dev, auth.magic));
@ -194,7 +195,7 @@ int drm_getmagic(struct inode *inode, struct file *filp,
}
DRM_DEBUG("%u\n", auth.magic);
if (copy_to_user((drm_auth_t __user *)arg, &auth, sizeof(auth)))
if (copy_to_user((drm_auth_t __user *) arg, &auth, sizeof(auth)))
return -EFAULT;
return 0;
}
@ -211,14 +212,14 @@ int drm_getmagic(struct inode *inode, struct file *filp,
* Checks if \p filp is associated with the magic number passed in \arg.
*/
int drm_authmagic(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_auth_t auth;
drm_file_t *file;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_auth_t auth;
drm_file_t *file;
if (copy_from_user(&auth, (drm_auth_t __user *)arg, sizeof(auth)))
if (copy_from_user(&auth, (drm_auth_t __user *) arg, sizeof(auth)))
return -EFAULT;
DRM_DEBUG("%u\n", auth.magic);
if ((file = drm_find_file(dev, auth.magic))) {

File diff suppressed because it is too large Load Diff

View File

@ -80,9 +80,9 @@
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19)
static inline struct page * vmalloc_to_page(void * vmalloc_addr)
static inline struct page *vmalloc_to_page(void *vmalloc_addr)
{
unsigned long addr = (unsigned long) vmalloc_addr;
unsigned long addr = (unsigned long)vmalloc_addr;
struct page *page = NULL;
pgd_t *pgd = pgd_offset_k(addr);
pmd_t *pmd;
@ -118,7 +118,7 @@ static inline struct page * vmalloc_to_page(void * vmalloc_addr)
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
static inline unsigned iminor(struct inode *inode)
{
return MINOR(inode->i_rdev);
return MINOR(inode->i_rdev);
}
#define old_encode_dev(x) (x)
@ -130,21 +130,38 @@ struct device;
#define pci_dev_put(x) do {} while (0)
#define pci_get_subsys pci_find_subsys
static inline struct class_device *DRM(sysfs_device_add)(struct drm_sysfs_class *cs, dev_t dev, struct device *device, const char *fmt, ...){return NULL;}
static inline struct class_device *DRM(sysfs_device_add) (struct drm_sysfs_class
* cs, dev_t dev,
struct device *
device,
const char *fmt,
...) {
return NULL;
}
static inline void DRM(sysfs_device_remove)(dev_t dev){}
static inline void DRM(sysfs_device_remove) (dev_t dev) {
}
static inline void DRM(sysfs_destroy)(struct drm_sysfs_class *cs){}
static inline void DRM(sysfs_destroy) (struct drm_sysfs_class * cs) {
}
static inline struct drm_sysfs_class *DRM(sysfs_create)(struct module *owner, char *name) { return NULL; }
static inline struct drm_sysfs_class *DRM(sysfs_create) (struct module * owner,
char *name) {
return NULL;
}
#ifndef pci_pretty_name
#define pci_pretty_name(x) x->name
#endif
struct drm_device;
static inline int radeon_create_i2c_busses(struct drm_device *dev){return 0;};
static inline void radeon_delete_i2c_busses(struct drm_device *dev){};
static inline int radeon_create_i2c_busses(struct drm_device *dev)
{
return 0;
};
static inline void radeon_delete_i2c_busses(struct drm_device *dev)
{
};
#endif

View File

@ -56,22 +56,23 @@
* in drm_device::context_sareas, while holding the drm_device::struct_sem
* lock.
*/
void drm_ctxbitmap_free( drm_device_t *dev, int ctx_handle )
void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
{
if ( ctx_handle < 0 ) goto failed;
if ( !dev->ctx_bitmap ) goto failed;
if (ctx_handle < 0)
goto failed;
if (!dev->ctx_bitmap)
goto failed;
if ( ctx_handle < DRM_MAX_CTXBITMAP ) {
if (ctx_handle < DRM_MAX_CTXBITMAP) {
down(&dev->struct_sem);
clear_bit( ctx_handle, dev->ctx_bitmap );
clear_bit(ctx_handle, dev->ctx_bitmap);
dev->context_sareas[ctx_handle] = NULL;
up(&dev->struct_sem);
return;
}
failed:
DRM_ERROR( "Attempt to free invalid context handle: %d\n",
ctx_handle );
return;
failed:
DRM_ERROR("Attempt to free invalid context handle: %d\n", ctx_handle);
return;
}
/**
@ -84,29 +85,33 @@ failed:
* drm_device::context_sareas to accommodate the new entry while holding the
* drm_device::struct_sem lock.
*/
int drm_ctxbitmap_next( drm_device_t *dev )
int drm_ctxbitmap_next(drm_device_t * dev)
{
int bit;
if(!dev->ctx_bitmap) return -1;
if (!dev->ctx_bitmap)
return -1;
down(&dev->struct_sem);
bit = find_first_zero_bit( dev->ctx_bitmap, DRM_MAX_CTXBITMAP );
if ( bit < DRM_MAX_CTXBITMAP ) {
set_bit( bit, dev->ctx_bitmap );
DRM_DEBUG( "drm_ctxbitmap_next bit : %d\n", bit );
if((bit+1) > dev->max_context) {
dev->max_context = (bit+1);
if(dev->context_sareas) {
bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP);
if (bit < DRM_MAX_CTXBITMAP) {
set_bit(bit, dev->ctx_bitmap);
DRM_DEBUG("drm_ctxbitmap_next bit : %d\n", bit);
if ((bit + 1) > dev->max_context) {
dev->max_context = (bit + 1);
if (dev->context_sareas) {
drm_map_t **ctx_sareas;
ctx_sareas = drm_realloc(dev->context_sareas,
(dev->max_context - 1) *
sizeof(*dev->context_sareas),
dev->max_context *
sizeof(*dev->context_sareas),
DRM_MEM_MAPS);
if(!ctx_sareas) {
(dev->max_context -
1) *
sizeof(*dev->
context_sareas),
dev->max_context *
sizeof(*dev->
context_sareas),
DRM_MEM_MAPS);
if (!ctx_sareas) {
clear_bit(bit, dev->ctx_bitmap);
up(&dev->struct_sem);
return -1;
@ -115,11 +120,11 @@ int drm_ctxbitmap_next( drm_device_t *dev )
dev->context_sareas[bit] = NULL;
} else {
/* max_context == 1 at this point */
dev->context_sareas = drm_alloc(
dev->max_context *
sizeof(*dev->context_sareas),
DRM_MEM_MAPS);
if(!dev->context_sareas) {
dev->context_sareas =
drm_alloc(dev->max_context *
sizeof(*dev->context_sareas),
DRM_MEM_MAPS);
if (!dev->context_sareas) {
clear_bit(bit, dev->ctx_bitmap);
up(&dev->struct_sem);
return -1;
@ -142,26 +147,26 @@ int drm_ctxbitmap_next( drm_device_t *dev )
* Allocates and initialize drm_device::ctx_bitmap and drm_device::context_sareas, while holding
* the drm_device::struct_sem lock.
*/
int drm_ctxbitmap_init( drm_device_t *dev )
int drm_ctxbitmap_init(drm_device_t * dev)
{
int i;
int temp;
int temp;
down(&dev->struct_sem);
dev->ctx_bitmap = (unsigned long *) drm_alloc( PAGE_SIZE,
DRM_MEM_CTXBITMAP );
if ( dev->ctx_bitmap == NULL ) {
dev->ctx_bitmap = (unsigned long *)drm_alloc(PAGE_SIZE,
DRM_MEM_CTXBITMAP);
if (dev->ctx_bitmap == NULL) {
up(&dev->struct_sem);
return -ENOMEM;
}
memset( (void *)dev->ctx_bitmap, 0, PAGE_SIZE );
memset((void *)dev->ctx_bitmap, 0, PAGE_SIZE);
dev->context_sareas = NULL;
dev->max_context = -1;
up(&dev->struct_sem);
for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
temp = drm_ctxbitmap_next( dev );
DRM_DEBUG( "drm_ctxbitmap_init : %d\n", temp );
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
temp = drm_ctxbitmap_next(dev);
DRM_DEBUG("drm_ctxbitmap_init : %d\n", temp);
}
return 0;
@ -175,14 +180,14 @@ int drm_ctxbitmap_init( drm_device_t *dev )
* Frees drm_device::ctx_bitmap and drm_device::context_sareas, while holding
* the drm_device::struct_sem lock.
*/
void drm_ctxbitmap_cleanup( drm_device_t *dev )
void drm_ctxbitmap_cleanup(drm_device_t * dev)
{
down(&dev->struct_sem);
if( dev->context_sareas ) drm_free( dev->context_sareas,
sizeof(*dev->context_sareas) *
dev->max_context,
DRM_MEM_MAPS );
drm_free( (void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP );
if (dev->context_sareas)
drm_free(dev->context_sareas,
sizeof(*dev->context_sareas) *
dev->max_context, DRM_MEM_MAPS);
drm_free((void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP);
up(&dev->struct_sem);
}
@ -205,10 +210,10 @@ void drm_ctxbitmap_cleanup( drm_device_t *dev )
* returns its handle.
*/
int drm_getsareactx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_priv_map_t __user *argp = (void __user *)arg;
drm_ctx_priv_map_t request;
drm_map_t *map;
@ -217,7 +222,8 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
return -EFAULT;
down(&dev->struct_sem);
if (dev->max_context < 0 || request.ctx_id >= (unsigned) dev->max_context) {
if (dev->max_context < 0
|| request.ctx_id >= (unsigned)dev->max_context) {
up(&dev->struct_sem);
return -EINVAL;
}
@ -244,37 +250,36 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
* drm_device::context_sareas with it.
*/
int drm_setsareactx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_priv_map_t request;
drm_map_t *map = NULL;
drm_map_list_t *r_list = NULL;
struct list_head *list;
if (copy_from_user(&request,
(drm_ctx_priv_map_t __user *)arg,
sizeof(request)))
(drm_ctx_priv_map_t __user *) arg, sizeof(request)))
return -EFAULT;
down(&dev->struct_sem);
list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head);
if(r_list->map &&
r_list->map->handle == request.handle)
if (r_list->map && r_list->map->handle == request.handle)
goto found;
}
bad:
bad:
up(&dev->struct_sem);
return -EINVAL;
found:
found:
map = r_list->map;
if (!map) goto bad;
if (!map)
goto bad;
if (dev->max_context < 0)
goto bad;
if (request.ctx_id >= (unsigned) dev->max_context)
if (request.ctx_id >= (unsigned)dev->max_context)
goto bad;
dev->context_sareas[request.ctx_id] = map;
up(&dev->struct_sem);
@ -297,22 +302,21 @@ found:
*
* Attempt to set drm_device::context_flag.
*/
int drm_context_switch( drm_device_t *dev, int old, int new )
int drm_context_switch(drm_device_t * dev, int old, int new)
{
if ( test_and_set_bit( 0, &dev->context_flag ) ) {
DRM_ERROR( "Reentering -- FIXME\n" );
return -EBUSY;
}
if (test_and_set_bit(0, &dev->context_flag)) {
DRM_ERROR("Reentering -- FIXME\n");
return -EBUSY;
}
DRM_DEBUG("Context switch from %d to %d\n", old, new);
DRM_DEBUG( "Context switch from %d to %d\n", old, new );
if (new == dev->last_context) {
clear_bit(0, &dev->context_flag);
return 0;
}
if ( new == dev->last_context ) {
clear_bit( 0, &dev->context_flag );
return 0;
}
return 0;
return 0;
}
/**
@ -326,22 +330,22 @@ int drm_context_switch( drm_device_t *dev, int old, int new )
* hardware lock is held, clears the drm_device::context_flag and wakes up
* drm_device::context_wait.
*/
int drm_context_switch_complete( drm_device_t *dev, int new )
int drm_context_switch_complete(drm_device_t * dev, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = jiffies;
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = jiffies;
if ( !_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ) {
DRM_ERROR( "Lock isn't held after context switch\n" );
}
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
}
/* If a context switch is ever initiated
when the kernel holds the lock, release
that lock here. */
clear_bit( 0, &dev->context_flag );
wake_up( &dev->context_wait );
/* If a context switch is ever initiated
when the kernel holds the lock, release
that lock here. */
clear_bit(0, &dev->context_flag);
wake_up(&dev->context_wait);
return 0;
return 0;
}
/**
@ -353,29 +357,28 @@ int drm_context_switch_complete( drm_device_t *dev, int new )
* \param arg user argument pointing to a drm_ctx_res structure.
* \return zero on success or a negative number on failure.
*/
int drm_resctx( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
int drm_resctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_ctx_res_t res;
drm_ctx_t __user *argp = (void __user *)arg;
drm_ctx_t ctx;
int i;
if ( copy_from_user( &res, argp, sizeof(res) ) )
if (copy_from_user(&res, argp, sizeof(res)))
return -EFAULT;
if ( res.count >= DRM_RESERVED_CONTEXTS ) {
memset( &ctx, 0, sizeof(ctx) );
for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
if ( copy_to_user( &res.contexts[i],
&i, sizeof(i) ) )
if (copy_to_user(&res.contexts[i], &i, sizeof(i)))
return -EFAULT;
}
}
res.count = DRM_RESERVED_CONTEXTS;
if ( copy_to_user( argp, &res, sizeof(res) ) )
if (copy_to_user(argp, &res, sizeof(res)))
return -EFAULT;
return 0;
}
@ -391,58 +394,57 @@ int drm_resctx( struct inode *inode, struct file *filp,
*
* Get a new handle for the context and copy to userspace.
*/
int drm_addctx( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
int drm_addctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_list_t * ctx_entry;
drm_ctx_list_t *ctx_entry;
drm_ctx_t __user *argp = (void __user *)arg;
drm_ctx_t ctx;
if ( copy_from_user( &ctx, argp, sizeof(ctx) ) )
if (copy_from_user(&ctx, argp, sizeof(ctx)))
return -EFAULT;
ctx.handle = drm_ctxbitmap_next( dev );
if ( ctx.handle == DRM_KERNEL_CONTEXT ) {
/* Skip kernel's context and get a new one. */
ctx.handle = drm_ctxbitmap_next( dev );
ctx.handle = drm_ctxbitmap_next(dev);
if (ctx.handle == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
ctx.handle = drm_ctxbitmap_next(dev);
}
DRM_DEBUG( "%d\n", ctx.handle );
if ( ctx.handle == -1 ) {
DRM_DEBUG( "Not enough free contexts.\n" );
/* Should this return -EBUSY instead? */
DRM_DEBUG("%d\n", ctx.handle);
if (ctx.handle == -1) {
DRM_DEBUG("Not enough free contexts.\n");
/* Should this return -EBUSY instead? */
return -ENOMEM;
}
if ( ctx.handle != DRM_KERNEL_CONTEXT )
{
if (ctx.handle != DRM_KERNEL_CONTEXT) {
if (dev->fn_tbl->context_ctor)
dev->fn_tbl->context_ctor(dev, ctx.handle);
}
ctx_entry = drm_alloc( sizeof(*ctx_entry), DRM_MEM_CTXLIST );
if ( !ctx_entry ) {
ctx_entry = drm_alloc(sizeof(*ctx_entry), DRM_MEM_CTXLIST);
if (!ctx_entry) {
DRM_DEBUG("out of memory\n");
return -ENOMEM;
}
INIT_LIST_HEAD( &ctx_entry->head );
INIT_LIST_HEAD(&ctx_entry->head);
ctx_entry->handle = ctx.handle;
ctx_entry->tag = priv;
down( &dev->ctxlist_sem );
list_add( &ctx_entry->head, &dev->ctxlist->head );
down(&dev->ctxlist_sem);
list_add(&ctx_entry->head, &dev->ctxlist->head);
++dev->ctx_count;
up( &dev->ctxlist_sem );
up(&dev->ctxlist_sem);
if ( copy_to_user( argp, &ctx, sizeof(ctx) ) )
if (copy_to_user(argp, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
int drm_modctx( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
int drm_modctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
/* This does nothing */
return 0;
@ -457,19 +459,19 @@ int drm_modctx( struct inode *inode, struct file *filp,
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*/
int drm_getctx( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
int drm_getctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_ctx_t __user *argp = (void __user *)arg;
drm_ctx_t ctx;
if ( copy_from_user( &ctx, argp, sizeof(ctx) ) )
if (copy_from_user(&ctx, argp, sizeof(ctx)))
return -EFAULT;
/* This is 0, because we don't handle any context flags */
ctx.flags = 0;
if ( copy_to_user( argp, &ctx, sizeof(ctx) ) )
if (copy_to_user(argp, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
@ -485,18 +487,18 @@ int drm_getctx( struct inode *inode, struct file *filp,
*
* Calls context_switch().
*/
int drm_switchctx( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
int drm_switchctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
if ( copy_from_user( &ctx, (drm_ctx_t __user *)arg, sizeof(ctx) ) )
if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG( "%d\n", ctx.handle );
return drm_context_switch( dev, dev->last_context, ctx.handle );
DRM_DEBUG("%d\n", ctx.handle);
return drm_context_switch(dev, dev->last_context, ctx.handle);
}
/**
@ -510,18 +512,18 @@ int drm_switchctx( struct inode *inode, struct file *filp,
*
* Calls context_switch_complete().
*/
int drm_newctx( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
int drm_newctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
if ( copy_from_user( &ctx, (drm_ctx_t __user *)arg, sizeof(ctx) ) )
if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG( "%d\n", ctx.handle );
drm_context_switch_complete( dev, ctx.handle );
DRM_DEBUG("%d\n", ctx.handle);
drm_context_switch_complete(dev, ctx.handle);
return 0;
}
@ -537,42 +539,41 @@ int drm_newctx( struct inode *inode, struct file *filp,
*
* If not the special kernel context, calls ctxbitmap_free() to free the specified context.
*/
int drm_rmctx( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
int drm_rmctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
if ( copy_from_user( &ctx, (drm_ctx_t __user *)arg, sizeof(ctx) ) )
if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG( "%d\n", ctx.handle );
if ( ctx.handle == DRM_KERNEL_CONTEXT + 1 ) {
DRM_DEBUG("%d\n", ctx.handle);
if (ctx.handle == DRM_KERNEL_CONTEXT + 1) {
priv->remove_auth_on_close = 1;
}
if ( ctx.handle != DRM_KERNEL_CONTEXT ) {
if (ctx.handle != DRM_KERNEL_CONTEXT) {
if (dev->fn_tbl->context_dtor)
dev->fn_tbl->context_dtor(dev, ctx.handle);
drm_ctxbitmap_free( dev, ctx.handle );
drm_ctxbitmap_free(dev, ctx.handle);
}
down( &dev->ctxlist_sem );
if ( !list_empty( &dev->ctxlist->head ) ) {
down(&dev->ctxlist_sem);
if (!list_empty(&dev->ctxlist->head)) {
drm_ctx_list_t *pos, *n;
list_for_each_entry_safe( pos, n, &dev->ctxlist->head, head ) {
if ( pos->handle == ctx.handle ) {
list_del( &pos->head );
drm_free( pos, sizeof(*pos), DRM_MEM_CTXLIST );
list_for_each_entry_safe(pos, n, &dev->ctxlist->head, head) {
if (pos->handle == ctx.handle) {
list_del(&pos->head);
drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);
--dev->ctx_count;
}
}
}
up( &dev->ctxlist_sem );
up(&dev->ctxlist_sem);
return 0;
}
/*@}*/

View File

@ -35,4 +35,3 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0

View File

@ -43,17 +43,17 @@
*
* Allocate and initialize a drm_device_dma structure.
*/
int drm_dma_setup( drm_device_t *dev )
int drm_dma_setup(drm_device_t * dev)
{
int i;
dev->dma = drm_alloc( sizeof(*dev->dma), DRM_MEM_DRIVER );
if ( !dev->dma )
dev->dma = drm_alloc(sizeof(*dev->dma), DRM_MEM_DRIVER);
if (!dev->dma)
return -ENOMEM;
memset( dev->dma, 0, sizeof(*dev->dma) );
memset(dev->dma, 0, sizeof(*dev->dma));
for ( i = 0 ; i <= DRM_MAX_ORDER ; i++ )
for (i = 0; i <= DRM_MAX_ORDER; i++)
memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
return 0;
@ -67,14 +67,15 @@ int drm_dma_setup( drm_device_t *dev )
* Free all pages associated with DMA buffers, the buffers and pages lists, and
* finally the the drm_device::dma structure itself.
*/
void drm_dma_takedown(drm_device_t *dev)
void drm_dma_takedown(drm_device_t * dev)
{
drm_device_dma_t *dma = dev->dma;
int i, j;
drm_device_dma_t *dma = dev->dma;
int i, j;
if (!dma) return;
if (!dma)
return;
/* Clear dma buffers */
/* Clear dma buffers */
for (i = 0; i <= DRM_MAX_ORDER; i++) {
if (dma->bufs[i].seg_count) {
DRM_DEBUG("order %d: buf_count = %d,"
@ -85,46 +86,43 @@ void drm_dma_takedown(drm_device_t *dev)
for (j = 0; j < dma->bufs[i].seg_count; j++) {
if (dma->bufs[i].seglist[j]) {
drm_free_pages(dma->bufs[i].seglist[j],
dma->bufs[i].page_order,
DRM_MEM_DMA);
dma->bufs[i].page_order,
DRM_MEM_DMA);
}
}
drm_free(dma->bufs[i].seglist,
dma->bufs[i].seg_count
* sizeof(*dma->bufs[0].seglist),
DRM_MEM_SEGS);
dma->bufs[i].seg_count
* sizeof(*dma->bufs[0].seglist), DRM_MEM_SEGS);
}
if (dma->bufs[i].buf_count) {
for (j = 0; j < dma->bufs[i].buf_count; j++) {
if (dma->bufs[i].buf_count) {
for (j = 0; j < dma->bufs[i].buf_count; j++) {
if (dma->bufs[i].buflist[j].dev_private) {
drm_free(dma->bufs[i].buflist[j].dev_private,
dma->bufs[i].buflist[j].dev_priv_size,
DRM_MEM_BUFS);
drm_free(dma->bufs[i].buflist[j].
dev_private,
dma->bufs[i].buflist[j].
dev_priv_size, DRM_MEM_BUFS);
}
}
drm_free(dma->bufs[i].buflist,
dma->bufs[i].buf_count *
sizeof(*dma->bufs[0].buflist),
DRM_MEM_BUFS);
drm_free(dma->bufs[i].buflist,
dma->bufs[i].buf_count *
sizeof(*dma->bufs[0].buflist), DRM_MEM_BUFS);
}
}
if (dma->buflist) {
drm_free(dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
DRM_MEM_BUFS);
dma->buf_count * sizeof(*dma->buflist), DRM_MEM_BUFS);
}
if (dma->pagelist) {
drm_free(dma->pagelist,
dma->page_count * sizeof(*dma->pagelist),
DRM_MEM_PAGES);
dma->page_count * sizeof(*dma->pagelist),
DRM_MEM_PAGES);
}
drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
dev->dma = NULL;
}
/**
* Free a buffer.
*
@ -133,16 +131,18 @@ void drm_dma_takedown(drm_device_t *dev)
*
* Resets the fields of \p buf.
*/
void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf)
void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf)
{
if (!buf) return;
if (!buf)
return;
buf->waiting = 0;
buf->pending = 0;
buf->filp = NULL;
buf->used = 0;
buf->waiting = 0;
buf->pending = 0;
buf->filp = NULL;
buf->used = 0;
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && waitqueue_active(&buf->dma_wait)) {
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)
&& waitqueue_active(&buf->dma_wait)) {
wake_up_interruptible(&buf->dma_wait);
}
}
@ -154,14 +154,15 @@ void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf)
*
* Frees each buffer associated with \p filp not already on the hardware.
*/
void drm_core_reclaim_buffers( struct file *filp )
void drm_core_reclaim_buffers(struct file *filp)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
int i;
int i;
if (!dma) return;
if (!dma)
return;
for (i = 0; i < dma->buf_count; i++) {
if (dma->buflist[i]->filp == filp) {
switch (dma->buflist[i]->list) {
@ -179,4 +180,3 @@ void drm_core_reclaim_buffers( struct file *filp )
}
}
EXPORT_SYMBOL(drm_core_reclaim_buffers);

View File

@ -37,20 +37,20 @@
/** No-op. */
int drm_adddraw(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
unsigned int cmd, unsigned long arg)
{
drm_draw_t draw;
draw.handle = 0; /* NOOP */
DRM_DEBUG("%d\n", draw.handle);
if (copy_to_user((drm_draw_t __user *)arg, &draw, sizeof(draw)))
if (copy_to_user((drm_draw_t __user *) arg, &draw, sizeof(draw)))
return -EFAULT;
return 0;
}
/** No-op. */
int drm_rmdraw(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
unsigned int cmd, unsigned long arg)
{
return 0; /* NOOP */
}

View File

@ -54,7 +54,7 @@
#include "drmP.h"
#include "drm_core.h"
static void __exit drm_cleanup( drm_device_t *dev );
static void __exit drm_cleanup(drm_device_t * dev);
#ifndef MODULE
/** Use an additional macro to avoid preprocessor troubles */
@ -64,80 +64,81 @@ static void __exit drm_cleanup( drm_device_t *dev );
* boot-loader (e.g., LILO). It calls the insmod option routine,
* parse_options().
*/
static int __init drm_options( char *str )
static int __init drm_options(char *str)
{
drm_parse_options( str );
drm_parse_options(str);
return 1;
}
__setup( DRIVER_NAME "=", DRM_OPTIONS_FUNC );
__setup(DRIVER_NAME "=", DRM_OPTIONS_FUNC);
#undef DRM_OPTIONS_FUNC
#endif
int drm_fb_loaded = 0;
/** Ioctl table */
drm_ioctl_desc_t drm_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { drm_version, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_by_busid, 0, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = { drm_getmap, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = { drm_getclient, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = { drm_getstats, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = { drm_setversion, 0, 1 },
drm_ioctl_desc_t drm_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0, 0},
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0, 0},
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0, 0},
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = {drm_irq_by_busid, 0, 1},
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = {drm_getmap, 0, 0},
[DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = {drm_getclient, 0, 0},
[DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = {drm_getstats, 0, 0},
[DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = {drm_setversion, 0, 1},
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_noop, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_noop, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = {drm_setunique, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = {drm_noop, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = {drm_noop, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = {drm_authmagic, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { drm_rmmap, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = {drm_addmap, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = {drm_rmmap, 1, 0},
[DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { drm_setsareactx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { drm_getsareactx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, 1, 0},
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { drm_addctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { drm_rmctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { drm_modctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { drm_getctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { drm_switchctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { drm_newctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { drm_resctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, 1, 0},
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = {drm_newctx, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = {drm_resctx, 1, 0},
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = {drm_adddraw, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = {drm_rmdraw, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { drm_lock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { drm_unlock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = {drm_lock, 1, 0},
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = {drm_unlock, 1, 0},
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_noop, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = {drm_noop, 1, 0},
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { drm_addbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { drm_mapbufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = {drm_addbufs, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = {drm_markbufs, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = {drm_infobufs, 1, 0},
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = {drm_mapbufs, 1, 0},
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = {drm_freebufs, 1, 0},
/* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = {NULL, 1, 0},
[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { drm_control, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = {drm_control, 1, 1},
#if __OS_HAS_AGP
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info, 1, 0},
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_bind, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_unbind, 1, 1},
#endif
[DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = { drm_sg_alloc, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = { drm_sg_free, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, 1, 1},
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = { drm_wait_vblank, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0, 0},
};
#define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( drm_ioctls )
@ -151,7 +152,7 @@ drm_ioctl_desc_t drm_ioctls[] = {
*
* \sa drm_device and setup().
*/
int drm_takedown( drm_device_t *dev )
int drm_takedown(drm_device_t * dev)
{
drm_magic_entry_t *pt, *next;
drm_map_t *map;
@ -160,73 +161,75 @@ int drm_takedown( drm_device_t *dev )
drm_vma_entry_t *vma, *vma_next;
int i;
DRM_DEBUG( "\n" );
DRM_DEBUG("\n");
if (dev->fn_tbl->pretakedown)
dev->fn_tbl->pretakedown(dev);
if ( dev->irq_enabled ) drm_irq_uninstall( dev );
if (dev->irq_enabled)
drm_irq_uninstall(dev);
down( &dev->struct_sem );
del_timer( &dev->timer );
down(&dev->struct_sem);
del_timer(&dev->timer);
if ( dev->devname ) {
drm_free( dev->devname, strlen( dev->devname ) + 1,
DRM_MEM_DRIVER );
if (dev->devname) {
drm_free(dev->devname, strlen(dev->devname) + 1,
DRM_MEM_DRIVER);
dev->devname = NULL;
}
if ( dev->unique ) {
drm_free( dev->unique, strlen( dev->unique ) + 1,
DRM_MEM_DRIVER );
if (dev->unique) {
drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
dev->unique = NULL;
dev->unique_len = 0;
}
/* Clear pid list */
for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
for ( pt = dev->magiclist[i].head ; pt ; pt = next ) {
/* Clear pid list */
for (i = 0; i < DRM_HASH_SIZE; i++) {
for (pt = dev->magiclist[i].head; pt; pt = next) {
next = pt->next;
drm_free( pt, sizeof(*pt), DRM_MEM_MAGIC );
drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
}
dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
}
/* Clear AGP information */
/* Clear AGP information */
if (drm_core_has_AGP(dev) && dev->agp) {
drm_agp_mem_t *entry;
drm_agp_mem_t *nexte;
/* Remove AGP resources, but leave dev->agp
intact until drv_cleanup is called. */
for ( entry = dev->agp->memory ; entry ; entry = nexte ) {
/* Remove AGP resources, but leave dev->agp
intact until drv_cleanup is called. */
for (entry = dev->agp->memory; entry; entry = nexte) {
nexte = entry->next;
if ( entry->bound ) drm_unbind_agp( entry->memory );
drm_free_agp( entry->memory, entry->pages );
drm_free( entry, sizeof(*entry), DRM_MEM_AGPLISTS );
if (entry->bound)
drm_unbind_agp(entry->memory);
drm_free_agp(entry->memory, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
}
dev->agp->memory = NULL;
if ( dev->agp->acquired ) drm_agp_do_release();
if (dev->agp->acquired)
drm_agp_do_release();
dev->agp->acquired = 0;
dev->agp->enabled = 0;
dev->agp->enabled = 0;
}
/* Clear vma list (only built for debugging) */
if ( dev->vmalist ) {
for ( vma = dev->vmalist ; vma ; vma = vma_next ) {
/* Clear vma list (only built for debugging) */
if (dev->vmalist) {
for (vma = dev->vmalist; vma; vma = vma_next) {
vma_next = vma->next;
drm_free( vma, sizeof(*vma), DRM_MEM_VMAS );
drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
dev->vmalist = NULL;
}
if( dev->maplist ) {
list_for_each_safe( list, list_next, &dev->maplist->head ) {
r_list = (drm_map_list_t *)list;
if (dev->maplist) {
list_for_each_safe(list, list_next, &dev->maplist->head) {
r_list = (drm_map_list_t *) list;
if ( ( map = r_list->map ) ) {
switch ( map->type ) {
if ((map = r_list->map)) {
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
continue;
@ -242,7 +245,8 @@ int drm_takedown( drm_device_t *dev )
break;
case _DRM_SCATTER_GATHER:
/* Handle it */
if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
if (drm_core_check_feature
(dev, DRIVER_SG) && dev->sg) {
drm_sg_cleanup(dev->sg);
dev->sg = NULL;
}
@ -250,38 +254,37 @@ int drm_takedown( drm_device_t *dev )
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
list_del( list );
list_del(list);
drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
}
}
}
}
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
for ( i = 0 ; i < dev->queue_count ; i++ ) {
for (i = 0; i < dev->queue_count; i++) {
if ( dev->queuelist[i] ) {
drm_free( dev->queuelist[i],
sizeof(*dev->queuelist[0]),
DRM_MEM_QUEUES );
if (dev->queuelist[i]) {
drm_free(dev->queuelist[i],
sizeof(*dev->queuelist[0]),
DRM_MEM_QUEUES);
dev->queuelist[i] = NULL;
}
}
drm_free( dev->queuelist,
dev->queue_slots * sizeof(*dev->queuelist),
DRM_MEM_QUEUES );
drm_free(dev->queuelist,
dev->queue_slots * sizeof(*dev->queuelist),
DRM_MEM_QUEUES);
dev->queuelist = NULL;
}
dev->queue_count = 0;
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
drm_dma_takedown( dev );
drm_dma_takedown(dev);
if ( dev->lock.hw_lock ) {
dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
if (dev->lock.hw_lock) {
dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.filp = NULL;
wake_up_interruptible( &dev->lock.lock_queue );
wake_up_interruptible(&dev->lock.lock_queue);
}
up( &dev->struct_sem );
up(&dev->struct_sem);
return 0;
}
@ -300,7 +303,7 @@ EXPORT_SYMBOL(drm_cleanup_pci);
#ifdef MODULE
static char *drm_opts = NULL;
#endif
MODULE_PARM( drm_opts, "s" );
MODULE_PARM(drm_opts, "s");
/**
* Module initialization. Called via init_module at module load time, or via
@ -315,29 +318,34 @@ MODULE_PARM( drm_opts, "s" );
* Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
* after the initialization for driver customization.
*/
int __devinit drm_init( struct pci_driver *driver, struct pci_device_id* pciidlist, struct drm_driver_fn *driver_fn)
int __devinit drm_init(struct pci_driver *driver,
struct pci_device_id *pciidlist,
struct drm_driver_fn *driver_fn)
{
struct pci_dev *pdev;
struct pci_device_id *pid;
int i;
DRM_DEBUG( "\n" );
DRM_DEBUG("\n");
#ifdef MODULE
drm_parse_options( drm_opts );
drm_parse_options(drm_opts);
#endif
drm_mem_init();
for (i=0; (pciidlist[i].vendor != 0) && !drm_fb_loaded; i++) {
for (i = 0; (pciidlist[i].vendor != 0) && !drm_fb_loaded; i++) {
pid = &pciidlist[i];
pdev = NULL;
/* pass back in pdev to account for multiple identical cards */
while ((pdev = pci_get_subsys(pid->vendor, pid->device, pid->subvendor, pid->subdevice, pdev))) {
while ((pdev =
pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
pid->subdevice, pdev))) {
/* is there already a driver loaded, or (short circuit saves work) */
/* does something like VesaFB have control of the memory region? */
if (pci_dev_driver(pdev) || pci_request_regions(pdev, "DRM scan")) {
if (pci_dev_driver(pdev)
|| pci_request_regions(pdev, "DRM scan")) {
/* go into stealth mode */
drm_fb_loaded = 1;
pci_dev_put(pdev);
@ -351,12 +359,15 @@ int __devinit drm_init( struct pci_driver *driver, struct pci_device_id* pciidli
if (drm_fb_loaded == 0)
pci_register_driver(driver);
else {
for (i=0; pciidlist[i].vendor != 0; i++) {
for (i = 0; pciidlist[i].vendor != 0; i++) {
pid = &pciidlist[i];
pdev = NULL;
/* pass back in pdev to account for multiple identical cards */
while ((pdev = pci_get_subsys(pid->vendor, pid->device, pid->subvendor, pid->subdevice, pdev))) {
while ((pdev =
pci_get_subsys(pid->vendor, pid->device,
pid->subvendor, pid->subdevice,
pdev))) {
/* stealth mode requires a manual probe */
drm_probe(pdev, &pciidlist[i], driver_fn);
}
@ -374,13 +385,13 @@ EXPORT_SYMBOL(drm_init);
*
* \sa drm_init().
*/
static void __exit drm_cleanup( drm_device_t *dev )
static void __exit drm_cleanup(drm_device_t * dev)
{
drm_map_t *map;
drm_map_list_t *r_list;
struct list_head *list, *list_next;
DRM_DEBUG( "\n" );
DRM_DEBUG("\n");
if (!dev) {
DRM_ERROR("cleanup called no dev\n");
return;
@ -388,24 +399,29 @@ static void __exit drm_cleanup( drm_device_t *dev )
drm_takedown(dev);
if( dev->maplist ) {
list_for_each_safe( list, list_next, &dev->maplist->head ) {
r_list = (drm_map_list_t *)list;
if (dev->maplist) {
list_for_each_safe(list, list_next, &dev->maplist->head) {
r_list = (drm_map_list_t *) list;
if ( ( map = r_list->map ) ) {
switch ( map->type ) {
if ((map = r_list->map)) {
switch (map->type) {
case _DRM_REGISTERS:
drm_ioremapfree( map->handle, map->size, dev );
drm_ioremapfree(map->handle, map->size,
dev);
break;
case _DRM_FRAME_BUFFER:
if ( drm_core_has_MTRR(dev)) {
if ( map->mtrr >= 0 ) {
if (drm_core_has_MTRR(dev)) {
if (map->mtrr >= 0) {
int retcode;
retcode = mtrr_del( map->mtrr,
map->offset,
map->size );
DRM_DEBUG( "mtrr_del=%d\n", retcode );
retcode =
mtrr_del(map->mtrr,
map->
offset,
map->size);
DRM_DEBUG
("mtrr_del=%d\n",
retcode);
}
}
break;
@ -418,43 +434,44 @@ static void __exit drm_cleanup( drm_device_t *dev )
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
list_del( list );
list_del(list);
drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
}
}
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
dev->maplist = NULL;
}
if (drm_fb_loaded==0)
}
if (drm_fb_loaded == 0)
pci_disable_device(dev->pdev);
drm_ctxbitmap_cleanup( dev );
drm_ctxbitmap_cleanup(dev);
if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp && dev->agp->agp_mtrr >= 0) {
if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp
&& dev->agp->agp_mtrr >= 0) {
int retval;
retval = mtrr_del( dev->agp->agp_mtrr,
dev->agp->agp_info.aper_base,
dev->agp->agp_info.aper_size*1024*1024 );
DRM_DEBUG( "mtrr_del=%d\n", retval );
retval = mtrr_del(dev->agp->agp_mtrr,
dev->agp->agp_info.aper_base,
dev->agp->agp_info.aper_size * 1024 * 1024);
DRM_DEBUG("mtrr_del=%d\n", retval);
}
if (drm_core_has_AGP(dev) && dev->agp ) {
drm_free( dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS );
if (drm_core_has_AGP(dev) && dev->agp) {
drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
dev->agp = NULL;
}
if (dev->fn_tbl->postcleanup)
dev->fn_tbl->postcleanup(dev);
if ( drm_put_minor(dev) )
DRM_ERROR( "Cannot unload module\n" );
if (drm_put_minor(dev))
DRM_ERROR("Cannot unload module\n");
}
void __exit drm_exit (struct pci_driver *driver)
void __exit drm_exit(struct pci_driver *driver)
{
int i;
drm_device_t *dev;
drm_minor_t *minor;
DRM_DEBUG( "\n" );
DRM_DEBUG("\n");
if (drm_fb_loaded) {
for (i = 0; i < cards_limit; i++) {
minor = &drm_minors[i];
@ -469,24 +486,24 @@ void __exit drm_exit (struct pci_driver *driver)
}
} else
pci_unregister_driver(driver);
DRM_INFO( "Module unloaded\n" );
DRM_INFO("Module unloaded\n");
}
EXPORT_SYMBOL(drm_exit);
/** File operations structure */
static struct file_operations drm_stub_fops = {
.owner = THIS_MODULE,
.open = drm_stub_open
.open = drm_stub_open
};
static int __init drm_core_init(void)
{
int ret = -ENOMEM;
cards_limit = (cards_limit < DRM_MAX_MINOR + 1 ? cards_limit : DRM_MAX_MINOR + 1);
drm_minors = drm_calloc(cards_limit,
sizeof(*drm_minors), DRM_MEM_STUB);
if(!drm_minors)
cards_limit =
(cards_limit < DRM_MAX_MINOR + 1 ? cards_limit : DRM_MAX_MINOR + 1);
drm_minors = drm_calloc(cards_limit, sizeof(*drm_minors), DRM_MEM_STUB);
if (!drm_minors)
goto err_p1;
if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
@ -494,7 +511,7 @@ static int __init drm_core_init(void)
drm_class = drm_sysfs_create(THIS_MODULE, "drm");
if (IS_ERR(drm_class)) {
printk (KERN_ERR "DRM: Error creating drm class.\n");
printk(KERN_ERR "DRM: Error creating drm class.\n");
ret = PTR_ERR(drm_class);
goto err_p2;
}
@ -505,26 +522,22 @@ static int __init drm_core_init(void)
ret = -1;
goto err_p3;
}
drm_agp = (drm_agp_t *)inter_module_get("drm_agp");
drm_agp = (drm_agp_t *) inter_module_get("drm_agp");
DRM_INFO( "Initialized %s %d.%d.%d %s\n",
DRIVER_NAME,
DRIVER_MAJOR,
DRIVER_MINOR,
DRIVER_PATCHLEVEL,
DRIVER_DATE
);
DRM_INFO("Initialized %s %d.%d.%d %s\n",
DRIVER_NAME,
DRIVER_MAJOR, DRIVER_MINOR, DRIVER_PATCHLEVEL, DRIVER_DATE);
return 0;
err_p3:
err_p3:
drm_sysfs_destroy(drm_class);
err_p2:
err_p2:
unregister_chrdev(DRM_MAJOR, "drm");
drm_free(drm_minors, sizeof(*drm_minors) * cards_limit, DRM_MEM_STUB);
err_p1:
err_p1:
return ret;
}
static void __exit drm_core_exit (void)
static void __exit drm_core_exit(void)
{
if (drm_agp)
inter_module_put("drm_agp");
@ -534,13 +547,11 @@ static void __exit drm_core_exit (void)
unregister_chrdev(DRM_MAJOR, "drm");
drm_free(drm_minors, sizeof(*drm_minors) *
cards_limit, DRM_MEM_STUB);
drm_free(drm_minors, sizeof(*drm_minors) * cards_limit, DRM_MEM_STUB);
}
module_init( drm_core_init );
module_exit( drm_core_exit );
module_init(drm_core_init);
module_exit(drm_core_exit);
/**
* Get version information
@ -553,8 +564,8 @@ module_exit( drm_core_exit );
*
* Fills in the version information in \p arg.
*/
int drm_version( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
int drm_version(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
@ -562,14 +573,14 @@ int drm_version( struct inode *inode, struct file *filp,
drm_version_t version;
int ret;
if ( copy_from_user( &version, argp, sizeof(version) ) )
if (copy_from_user(&version, argp, sizeof(version)))
return -EFAULT;
/* version is a required function to return the personality module version */
if ((ret = dev->fn_tbl->version(&version)))
return ret;
if ( copy_to_user( argp, &version, sizeof(version) ) )
if (copy_to_user(argp, &version, sizeof(version)))
return -EFAULT;
return 0;
}
@ -586,8 +597,8 @@ int drm_version( struct inode *inode, struct file *filp,
* Looks up the ioctl function in the ::ioctls table, checking for root
* previleges if so required, and dispatches to the respective function.
*/
int drm_ioctl( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
int drm_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
@ -596,38 +607,39 @@ int drm_ioctl( struct inode *inode, struct file *filp,
unsigned int nr = DRM_IOCTL_NR(cmd);
int retcode = -EINVAL;
atomic_inc( &dev->ioctl_count );
atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] );
atomic_inc(&dev->ioctl_count);
atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
++priv->ioctl_count;
DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
current->pid, cmd, nr, (long)old_encode_dev(dev->device),
priv->authenticated );
DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
current->pid, cmd, nr, (long)old_encode_dev(dev->device),
priv->authenticated);
if (nr < DRIVER_IOCTL_COUNT)
ioctl = &drm_ioctls[nr];
else if ((nr >= DRM_COMMAND_BASE) || (nr < DRM_COMMAND_BASE + dev->fn_tbl->num_ioctls))
else if ((nr >= DRM_COMMAND_BASE)
|| (nr < DRM_COMMAND_BASE + dev->fn_tbl->num_ioctls))
ioctl = &dev->fn_tbl->ioctls[nr - DRM_COMMAND_BASE];
else
goto err_i1;
func = ioctl->func;
if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->fn_tbl->dma_ioctl) /* Local override? */
if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->fn_tbl->dma_ioctl) /* Local override? */
func = dev->fn_tbl->dma_ioctl;
if ( !func ) {
DRM_DEBUG( "no function\n" );
if (!func) {
DRM_DEBUG("no function\n");
retcode = -EINVAL;
} else if ( ( ioctl->root_only && !capable( CAP_SYS_ADMIN ) )||
( ioctl->auth_needed && !priv->authenticated ) ) {
} else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN)) ||
(ioctl->auth_needed && !priv->authenticated)) {
retcode = -EACCES;
} else {
retcode = func( inode, filp, cmd, arg );
retcode = func(inode, filp, cmd, arg);
}
err_i1:
atomic_dec( &dev->ioctl_count );
if (retcode) DRM_DEBUG( "ret = %x\n", retcode);
err_i1:
atomic_dec(&dev->ioctl_count);
if (retcode)
DRM_DEBUG("ret = %x\n", retcode);
return retcode;
}
EXPORT_SYMBOL(drm_ioctl);

View File

@ -37,42 +37,41 @@
#include "drmP.h"
#include <linux/poll.h>
static int drm_setup( drm_device_t *dev )
static int drm_setup(drm_device_t * dev)
{
int i;
if (dev->fn_tbl->presetup)
dev->fn_tbl->presetup(dev);
atomic_set( &dev->ioctl_count, 0 );
atomic_set( &dev->vma_count, 0 );
atomic_set(&dev->ioctl_count, 0);
atomic_set(&dev->vma_count, 0);
dev->buf_use = 0;
atomic_set( &dev->buf_alloc, 0 );
atomic_set(&dev->buf_alloc, 0);
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
{
i = drm_dma_setup( dev );
if ( i < 0 )
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
i = drm_dma_setup(dev);
if (i < 0)
return i;
}
for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ )
atomic_set( &dev->counts[i], 0 );
for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
atomic_set(&dev->counts[i], 0);
for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
for (i = 0; i < DRM_HASH_SIZE; i++) {
dev->magiclist[i].head = NULL;
dev->magiclist[i].tail = NULL;
}
dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist),
DRM_MEM_CTXLIST);
if(dev->ctxlist == NULL) return -ENOMEM;
dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist), DRM_MEM_CTXLIST);
if (dev->ctxlist == NULL)
return -ENOMEM;
memset(dev->ctxlist, 0, sizeof(*dev->ctxlist));
INIT_LIST_HEAD(&dev->ctxlist->head);
dev->vmalist = NULL;
dev->sigdata.lock = dev->lock.hw_lock = NULL;
init_waitqueue_head( &dev->lock.lock_queue );
init_waitqueue_head(&dev->lock.lock_queue);
dev->queue_count = 0;
dev->queue_reserved = 0;
dev->queue_slots = 0;
@ -84,7 +83,7 @@ static int drm_setup( drm_device_t *dev )
dev->last_context = 0;
dev->last_switch = 0;
dev->last_checked = 0;
init_waitqueue_head( &dev->context_wait );
init_waitqueue_head(&dev->context_wait);
dev->if_version = 0;
dev->ctx_start = 0;
@ -94,14 +93,14 @@ static int drm_setup( drm_device_t *dev )
dev->buf_wp = dev->buf;
dev->buf_end = dev->buf + DRM_BSZ;
dev->buf_async = NULL;
init_waitqueue_head( &dev->buf_readers );
init_waitqueue_head( &dev->buf_writers );
init_waitqueue_head(&dev->buf_readers);
init_waitqueue_head(&dev->buf_writers);
DRM_DEBUG( "\n" );
DRM_DEBUG("\n");
/*
* The kernel's context could be created here, but is now created
* in drm_dma_enqueue. This is more resource-efficient for
* in drm_dma_enqueue. This is more resource-efficient for
* hardware that does not do DMA, but may mean that
* drm_select_queue fails between the time the interrupt is
* initialized and the time the queues are initialized.
@ -112,7 +111,6 @@ static int drm_setup( drm_device_t *dev )
return 0;
}
/**
* Open file.
*
@ -124,7 +122,7 @@ static int drm_setup( drm_device_t *dev )
* increments the device open count. If the open count was previous at zero,
* i.e., it's the first that the device is open, then calls setup().
*/
int drm_open( struct inode *inode, struct file *filp )
int drm_open(struct inode *inode, struct file *filp)
{
drm_device_t *dev = NULL;
int minor = iminor(inode);
@ -137,15 +135,15 @@ int drm_open( struct inode *inode, struct file *filp )
if (!dev)
return -ENODEV;
retcode = drm_open_helper( inode, filp, dev );
if ( !retcode ) {
atomic_inc( &dev->counts[_DRM_STAT_OPENS] );
spin_lock( &dev->count_lock );
if ( !dev->open_count++ ) {
spin_unlock( &dev->count_lock );
return drm_setup( dev );
retcode = drm_open_helper(inode, filp, dev);
if (!retcode) {
atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
spin_lock(&dev->count_lock);
if (!dev->open_count++) {
spin_unlock(&dev->count_lock);
return drm_setup(dev);
}
spin_unlock( &dev->count_lock );
spin_unlock(&dev->count_lock);
}
return retcode;
@ -199,47 +197,50 @@ int drm_stub_open(struct inode *inode, struct file *filp)
* Creates and initializes a drm_file structure for the file private data in \p
* filp and add it into the double linked list in \p dev.
*/
int drm_open_helper(struct inode *inode, struct file *filp, drm_device_t *dev)
int drm_open_helper(struct inode *inode, struct file *filp, drm_device_t * dev)
{
int minor = iminor(inode);
drm_file_t *priv;
int minor = iminor(inode);
drm_file_t *priv;
int ret;
if (filp->f_flags & O_EXCL) return -EBUSY; /* No exclusive opens */
if (!drm_cpu_valid()) return -EINVAL;
if (filp->f_flags & O_EXCL)
return -EBUSY; /* No exclusive opens */
if (!drm_cpu_valid())
return -EINVAL;
DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
if(!priv) return -ENOMEM;
priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
if (!priv)
return -ENOMEM;
memset(priv, 0, sizeof(*priv));
filp->private_data = priv;
priv->uid = current->euid;
priv->pid = current->pid;
priv->minor = minor;
priv->dev = dev;
priv->ioctl_count = 0;
filp->private_data = priv;
priv->uid = current->euid;
priv->pid = current->pid;
priv->minor = minor;
priv->dev = dev;
priv->ioctl_count = 0;
priv->authenticated = capable(CAP_SYS_ADMIN);
priv->lock_count = 0;
priv->lock_count = 0;
if (dev->fn_tbl->open_helper) {
ret=dev->fn_tbl->open_helper(dev, priv);
ret = dev->fn_tbl->open_helper(dev, priv);
if (ret < 0)
goto out_free;
}
down(&dev->struct_sem);
if (!dev->file_last) {
priv->next = NULL;
priv->prev = NULL;
priv->next = NULL;
priv->prev = NULL;
dev->file_first = priv;
dev->file_last = priv;
dev->file_last = priv;
} else {
priv->next = NULL;
priv->prev = dev->file_last;
priv->next = NULL;
priv->prev = dev->file_last;
dev->file_last->next = priv;
dev->file_last = priv;
dev->file_last = priv;
}
up(&dev->struct_sem);
@ -250,31 +251,35 @@ int drm_open_helper(struct inode *inode, struct file *filp, drm_device_t *dev)
if (!dev->hose) {
struct pci_dev *pci_dev;
pci_dev = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
if (pci_dev) dev->hose = pci_dev->sysdata;
if (pci_dev)
dev->hose = pci_dev->sysdata;
if (!dev->hose) {
struct pci_bus *b = pci_bus_b(pci_root_buses.next);
if (b) dev->hose = b->sysdata;
if (b)
dev->hose = b->sysdata;
}
}
#endif
return 0;
out_free:
out_free:
drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
filp->private_data=NULL;
filp->private_data = NULL;
return ret;
}
/** No-op. */
int drm_fasync(int fd, struct file *filp, int on)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int retcode;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int retcode;
DRM_DEBUG("fd = %d, device = 0x%lx\n", fd, (long)old_encode_dev(dev->device));
DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
(long)old_encode_dev(dev->device));
retcode = fasync_helper(fd, filp, on, &dev->buf_async);
if (retcode < 0) return retcode;
if (retcode < 0)
return retcode;
return 0;
}
EXPORT_SYMBOL(drm_fasync);
@ -291,7 +296,7 @@ EXPORT_SYMBOL(drm_fasync);
* data from its list and free it. Decreases the open count and if it reaches
* zero calls takedown().
*/
int drm_release( struct inode *inode, struct file *filp )
int drm_release(struct inode *inode, struct file *filp)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev;
@ -300,7 +305,7 @@ int drm_release( struct inode *inode, struct file *filp )
lock_kernel();
dev = priv->dev;
DRM_DEBUG( "open_count = %d\n", dev->open_count );
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (dev->fn_tbl->prerelease)
dev->fn_tbl->prerelease(dev, filp);
@ -309,134 +314,133 @@ int drm_release( struct inode *inode, struct file *filp )
* Begin inline drm_release
*/
DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n",
current->pid, (long)old_encode_dev(dev->device), dev->open_count );
DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
current->pid, (long)old_encode_dev(dev->device),
dev->open_count);
if ( priv->lock_count && dev->lock.hw_lock &&
_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
dev->lock.filp == filp ) {
DRM_DEBUG( "File %p released, freeing lock for context %d\n",
filp,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
if (priv->lock_count && dev->lock.hw_lock &&
_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
dev->lock.filp == filp) {
DRM_DEBUG("File %p released, freeing lock for context %d\n",
filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
if (dev->fn_tbl->release)
dev->fn_tbl->release(dev, filp);
drm_lock_free( dev, &dev->lock.hw_lock->lock,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
drm_lock_free(dev, &dev->lock.hw_lock->lock,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
/* FIXME: may require heavy-handed reset of
hardware at this point, possibly
processed via a callback to the X
server. */
}
else if ( dev->fn_tbl->release && priv->lock_count && dev->lock.hw_lock ) {
/* FIXME: may require heavy-handed reset of
hardware at this point, possibly
processed via a callback to the X
server. */
} else if (dev->fn_tbl->release && priv->lock_count
&& dev->lock.hw_lock) {
/* The lock is required to reclaim buffers */
DECLARE_WAITQUEUE( entry, current );
DECLARE_WAITQUEUE(entry, current);
add_wait_queue( &dev->lock.lock_queue, &entry );
add_wait_queue(&dev->lock.lock_queue, &entry);
for (;;) {
current->state = TASK_INTERRUPTIBLE;
if ( !dev->lock.hw_lock ) {
if (!dev->lock.hw_lock) {
/* Device has been unregistered */
retcode = -EINTR;
break;
}
if ( drm_lock_take( &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT ) ) {
dev->lock.filp = filp;
if (drm_lock_take(&dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
dev->lock.filp = filp;
dev->lock.lock_time = jiffies;
atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
break; /* Got lock */
}
/* Contention */
/* Contention */
schedule();
if ( signal_pending( current ) ) {
if (signal_pending(current)) {
retcode = -ERESTARTSYS;
break;
}
}
current->state = TASK_RUNNING;
remove_wait_queue( &dev->lock.lock_queue, &entry );
if( !retcode ) {
remove_wait_queue(&dev->lock.lock_queue, &entry);
if (!retcode) {
if (dev->fn_tbl->release)
dev->fn_tbl->release(dev, filp);
drm_lock_free( dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT );
drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT);
}
}
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
{
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
dev->fn_tbl->reclaim_buffers(filp);
}
drm_fasync( -1, filp, 0 );
drm_fasync(-1, filp, 0);
down( &dev->ctxlist_sem );
if ( !list_empty( &dev->ctxlist->head ) ) {
down(&dev->ctxlist_sem);
if (!list_empty(&dev->ctxlist->head)) {
drm_ctx_list_t *pos, *n;
list_for_each_entry_safe( pos, n, &dev->ctxlist->head, head ) {
if ( pos->tag == priv &&
pos->handle != DRM_KERNEL_CONTEXT ) {
list_for_each_entry_safe(pos, n, &dev->ctxlist->head, head) {
if (pos->tag == priv &&
pos->handle != DRM_KERNEL_CONTEXT) {
if (dev->fn_tbl->context_dtor)
dev->fn_tbl->context_dtor(dev, pos->handle);
dev->fn_tbl->context_dtor(dev,
pos->handle);
drm_ctxbitmap_free( dev, pos->handle );
drm_ctxbitmap_free(dev, pos->handle);
list_del( &pos->head );
drm_free( pos, sizeof(*pos), DRM_MEM_CTXLIST );
list_del(&pos->head);
drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);
--dev->ctx_count;
}
}
}
up( &dev->ctxlist_sem );
up(&dev->ctxlist_sem);
down( &dev->struct_sem );
if ( priv->remove_auth_on_close == 1 ) {
down(&dev->struct_sem);
if (priv->remove_auth_on_close == 1) {
drm_file_t *temp = dev->file_first;
while ( temp ) {
while (temp) {
temp->authenticated = 0;
temp = temp->next;
}
}
if ( priv->prev ) {
if (priv->prev) {
priv->prev->next = priv->next;
} else {
dev->file_first = priv->next;
dev->file_first = priv->next;
}
if ( priv->next ) {
if (priv->next) {
priv->next->prev = priv->prev;
} else {
dev->file_last = priv->prev;
dev->file_last = priv->prev;
}
up( &dev->struct_sem );
up(&dev->struct_sem);
if (dev->fn_tbl->free_filp_priv)
dev->fn_tbl->free_filp_priv( dev, priv );
drm_free( priv, sizeof(*priv), DRM_MEM_FILES );
dev->fn_tbl->free_filp_priv(dev, priv);
drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
/* ========================================================
* End inline drm_release
*/
atomic_inc( &dev->counts[_DRM_STAT_CLOSES] );
spin_lock( &dev->count_lock );
if ( !--dev->open_count ) {
if ( atomic_read( &dev->ioctl_count ) || dev->blocked ) {
DRM_ERROR( "Device busy: %d %d\n",
atomic_read( &dev->ioctl_count ),
dev->blocked );
spin_unlock( &dev->count_lock );
atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
spin_lock(&dev->count_lock);
if (!--dev->open_count) {
if (atomic_read(&dev->ioctl_count) || dev->blocked) {
DRM_ERROR("Device busy: %d %d\n",
atomic_read(&dev->ioctl_count), dev->blocked);
spin_unlock(&dev->count_lock);
unlock_kernel();
return -EBUSY;
}
spin_unlock( &dev->count_lock );
spin_unlock(&dev->count_lock);
unlock_kernel();
return drm_takedown( dev );
return drm_takedown(dev);
}
spin_unlock( &dev->count_lock );
spin_unlock(&dev->count_lock);
unlock_kernel();

View File

@ -55,10 +55,14 @@ static void drm_parse_option(char *s)
char *c, *r;
DRM_DEBUG("\"%s\"\n", s);
if (!s || !*s) return;
for (c = s; *c && *c != ':'; c++); /* find : or \0 */
if (*c) r = c + 1; else r = NULL; /* remember remainder */
*c = '\0'; /* terminate */
if (!s || !*s)
return;
for (c = s; *c && *c != ':'; c++) ; /* find : or \0 */
if (*c)
r = c + 1;
else
r = NULL; /* remember remainder */
*c = '\0'; /* terminate */
if (!strcmp(s, "debug")) {
drm_flags |= DRM_FLAG_DEBUG;
DRM_INFO("Debug messages ON\n");
@ -101,14 +105,18 @@ void drm_parse_options(char *s)
{
char *h, *t, *n;
DRM_DEBUG("\"%s\"\n", s ?: "");
if (!s || !*s) return;
DRM_DEBUG("\"%s\"\n", s ? : "");
if (!s || !*s)
return;
for (h = t = n = s; h && *h; h = n) {
for (; *t && *t != ';'; t++); /* find ; or \0 */
if (*t) n = t + 1; else n = NULL; /* remember next */
*t = '\0'; /* terminate */
drm_parse_option(h); /* parse */
for (; *t && *t != ';'; t++) ; /* find ; or \0 */
if (*t)
n = t + 1;
else
n = NULL; /* remember next */
*t = '\0'; /* terminate */
drm_parse_option(h); /* parse */
}
}
@ -120,10 +128,11 @@ void drm_parse_options(char *s)
int drm_cpu_valid(void)
{
#if defined(__i386__)
if (boot_cpu_data.x86 == 3) return 0; /* No cmpxchg on a 386 */
if (boot_cpu_data.x86 == 3)
return 0; /* No cmpxchg on a 386 */
#endif
#if defined(__sparc__) && !defined(__sparc_v9__)
return 0; /* No cmpxchg before v9 sparc. */
return 0; /* No cmpxchg before v9 sparc. */
#endif
return 1;
}

View File

@ -50,12 +50,12 @@
* Copies the bus id from drm_device::unique into user space.
*/
int drm_getunique(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_unique_t __user *argp = (void __user *)arg;
drm_unique_t u;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_unique_t __user *argp = (void __user *)arg;
drm_unique_t u;
if (copy_from_user(&u, argp, sizeof(u)))
return -EFAULT;
@ -84,30 +84,33 @@ int drm_getunique(struct inode *inode, struct file *filp,
* version 1.1 or greater.
*/
int drm_setunique(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_unique_t u;
int domain, bus, slot, func, ret;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_unique_t u;
int domain, bus, slot, func, ret;
if (dev->unique_len || dev->unique) return -EBUSY;
if (dev->unique_len || dev->unique)
return -EBUSY;
if (copy_from_user(&u, (drm_unique_t __user *)arg, sizeof(u)))
if (copy_from_user(&u, (drm_unique_t __user *) arg, sizeof(u)))
return -EFAULT;
if (!u.unique_len || u.unique_len > 1024) return -EINVAL;
if (!u.unique_len || u.unique_len > 1024)
return -EINVAL;
dev->unique_len = u.unique_len;
dev->unique = drm_alloc(u.unique_len + 1, DRM_MEM_DRIVER);
if(!dev->unique) return -ENOMEM;
dev->unique = drm_alloc(u.unique_len + 1, DRM_MEM_DRIVER);
if (!dev->unique)
return -ENOMEM;
if (copy_from_user(dev->unique, u.unique, dev->unique_len))
return -EFAULT;
dev->unique[dev->unique_len] = '\0';
dev->devname = drm_alloc(strlen(dev->name) + strlen(dev->unique) + 2,
DRM_MEM_DRIVER);
DRM_MEM_DRIVER);
if (!dev->devname)
return -ENOMEM;
@ -124,15 +127,13 @@ int drm_setunique(struct inode *inode, struct file *filp,
if ((domain != dev->pci_domain) ||
(bus != dev->pci_bus) ||
(slot != dev->pci_slot) ||
(func != dev->pci_func))
(slot != dev->pci_slot) || (func != dev->pci_func))
return -EINVAL;
return 0;
}
static int
drm_set_busid(drm_device_t *dev)
static int drm_set_busid(drm_device_t * dev)
{
if (dev->unique != NULL)
return EBUSY;
@ -143,10 +144,10 @@ drm_set_busid(drm_device_t *dev)
return ENOMEM;
snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d",
dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func);
dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func);
dev->devname = drm_alloc(strlen(dev->name) + dev->unique_len + 2,
DRM_MEM_DRIVER);
DRM_MEM_DRIVER);
if (dev->devname == NULL)
return ENOMEM;
@ -155,7 +156,6 @@ drm_set_busid(drm_device_t *dev)
return 0;
}
/**
* Get a mapping information.
*
@ -169,17 +169,17 @@ drm_set_busid(drm_device_t *dev)
* Searches for the mapping with the specified offset and copies its information
* into userspace
*/
int drm_getmap( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
int drm_getmap(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_map_t __user *argp = (void __user *)arg;
drm_map_t map;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_map_t __user *argp = (void __user *)arg;
drm_map_t map;
drm_map_list_t *r_list = NULL;
struct list_head *list;
int idx;
int i;
int idx;
int i;
if (copy_from_user(&map, argp, sizeof(map)))
return -EFAULT;
@ -193,26 +193,27 @@ int drm_getmap( struct inode *inode, struct file *filp,
i = 0;
list_for_each(list, &dev->maplist->head) {
if(i == idx) {
if (i == idx) {
r_list = list_entry(list, drm_map_list_t, head);
break;
}
i++;
}
if(!r_list || !r_list->map) {
if (!r_list || !r_list->map) {
up(&dev->struct_sem);
return -EINVAL;
}
map.offset = r_list->map->offset;
map.size = r_list->map->size;
map.type = r_list->map->type;
map.flags = r_list->map->flags;
map.size = r_list->map->size;
map.type = r_list->map->type;
map.flags = r_list->map->flags;
map.handle = r_list->map->handle;
map.mtrr = r_list->map->mtrr;
map.mtrr = r_list->map->mtrr;
up(&dev->struct_sem);
if (copy_to_user(argp, &map, sizeof(map))) return -EFAULT;
if (copy_to_user(argp, &map, sizeof(map)))
return -EFAULT;
return 0;
}
@ -229,33 +230,32 @@ int drm_getmap( struct inode *inode, struct file *filp,
* Searches for the client with the specified index and copies its information
* into userspace
*/
int drm_getclient( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
int drm_getclient(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_client_t __user *argp = (void __user *)arg;
drm_client_t client;
drm_file_t *pt;
int idx;
int i;
drm_file_t *pt;
int idx;
int i;
if (copy_from_user(&client, argp, sizeof(client)))
return -EFAULT;
idx = client.idx;
down(&dev->struct_sem);
for (i = 0, pt = dev->file_first; i < idx && pt; i++, pt = pt->next)
;
for (i = 0, pt = dev->file_first; i < idx && pt; i++, pt = pt->next) ;
if (!pt) {
up(&dev->struct_sem);
return -EINVAL;
}
client.auth = pt->authenticated;
client.pid = pt->pid;
client.uid = pt->uid;
client.auth = pt->authenticated;
client.pid = pt->pid;
client.uid = pt->uid;
client.magic = pt->magic;
client.iocs = pt->ioctl_count;
client.iocs = pt->ioctl_count;
up(&dev->struct_sem);
if (copy_to_user(argp, &client, sizeof(client)))
@ -273,13 +273,13 @@ int drm_getclient( struct inode *inode, struct file *filp,
*
* \return zero on success or a negative number on failure.
*/
int drm_getstats( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
int drm_getstats(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_stats_t stats;
int i;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_stats_t stats;
int i;
memset(&stats, 0, sizeof(stats));
@ -288,18 +288,17 @@ int drm_getstats( struct inode *inode, struct file *filp,
for (i = 0; i < dev->counters; i++) {
if (dev->types[i] == _DRM_STAT_LOCK)
stats.data[i].value
= (dev->lock.hw_lock
? dev->lock.hw_lock->lock : 0);
= (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0);
else
stats.data[i].value = atomic_read(&dev->counts[i]);
stats.data[i].type = dev->types[i];
stats.data[i].type = dev->types[i];
}
stats.count = dev->counters;
up(&dev->struct_sem);
if (copy_to_user((drm_stats_t __user *)arg, &stats, sizeof(stats)))
if (copy_to_user((drm_stats_t __user *) arg, &stats, sizeof(stats)))
return -EFAULT;
return 0;
}
@ -359,7 +358,7 @@ int drm_setversion(DRM_IOCTL_ARGS)
/** No-op ioctl. */
int drm_noop(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
unsigned long arg)
{
DRM_DEBUG("\n");
return 0;

View File

@ -51,7 +51,7 @@
* to that of the device that this DRM instance attached to.
*/
int drm_irq_by_busid(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
@ -66,14 +66,12 @@ int drm_irq_by_busid(struct inode *inode, struct file *filp,
if ((p.busnum >> 8) != dev->pci_domain ||
(p.busnum & 0xff) != dev->pci_bus ||
p.devnum != dev->pci_slot ||
p.funcnum != dev->pci_func)
p.devnum != dev->pci_slot || p.funcnum != dev->pci_func)
return -EINVAL;
p.irq = dev->irq;
DRM_DEBUG("%d:%d:%d => IRQ %d\n",
p.busnum, p.devnum, p.funcnum, p.irq);
DRM_DEBUG("%d:%d:%d => IRQ %d\n", p.busnum, p.devnum, p.funcnum, p.irq);
if (copy_to_user(argp, &p, sizeof(p)))
return -EFAULT;
return 0;
@ -88,33 +86,33 @@ int drm_irq_by_busid(struct inode *inode, struct file *filp,
* \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
* before and after the installation.
*/
int drm_irq_install( drm_device_t *dev )
int drm_irq_install(drm_device_t * dev)
{
int ret;
unsigned long sh_flags=0;
unsigned long sh_flags = 0;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
if ( dev->irq == 0 )
if (dev->irq == 0)
return -EINVAL;
down( &dev->struct_sem );
down(&dev->struct_sem);
/* Driver must have been initialized */
if ( !dev->dev_private ) {
up( &dev->struct_sem );
if (!dev->dev_private) {
up(&dev->struct_sem);
return -EINVAL;
}
if ( dev->irq_enabled ) {
up( &dev->struct_sem );
if (dev->irq_enabled) {
up(&dev->struct_sem);
return -EBUSY;
}
dev->irq_enabled = 1;
up( &dev->struct_sem );
up(&dev->struct_sem);
DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, dev->irq );
DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq);
dev->dma->next_buffer = NULL;
dev->dma->next_queue = NULL;
@ -123,30 +121,30 @@ int drm_irq_install( drm_device_t *dev )
if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
init_waitqueue_head(&dev->vbl_queue);
spin_lock_init( &dev->vbl_lock );
spin_lock_init(&dev->vbl_lock);
INIT_LIST_HEAD( &dev->vbl_sigs.head );
INIT_LIST_HEAD(&dev->vbl_sigs.head);
dev->vbl_pending = 0;
}
/* Before installing handler */
/* Before installing handler */
dev->fn_tbl->irq_preinstall(dev);
/* Install handler */
/* Install handler */
if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
sh_flags = SA_SHIRQ;
ret = request_irq( dev->irq, dev->fn_tbl->irq_handler,
sh_flags, dev->devname, dev );
if ( ret < 0 ) {
down( &dev->struct_sem );
ret = request_irq(dev->irq, dev->fn_tbl->irq_handler,
sh_flags, dev->devname, dev);
if (ret < 0) {
down(&dev->struct_sem);
dev->irq_enabled = 0;
up( &dev->struct_sem );
up(&dev->struct_sem);
return ret;
}
/* After installing handler */
/* After installing handler */
dev->fn_tbl->irq_postinstall(dev);
return 0;
@ -159,26 +157,26 @@ int drm_irq_install( drm_device_t *dev )
*
* Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
*/
int drm_irq_uninstall( drm_device_t *dev )
int drm_irq_uninstall(drm_device_t * dev)
{
int irq_enabled;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
down( &dev->struct_sem );
down(&dev->struct_sem);
irq_enabled = dev->irq_enabled;
dev->irq_enabled = 0;
up( &dev->struct_sem );
up(&dev->struct_sem);
if ( !irq_enabled )
if (!irq_enabled)
return -EINVAL;
DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, dev->irq );
DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq);
dev->fn_tbl->irq_uninstall(dev);
free_irq( dev->irq, dev );
free_irq(dev->irq, dev);
return 0;
}
@ -195,8 +193,8 @@ EXPORT_SYMBOL(drm_irq_uninstall);
*
* Calls irq_install() or irq_uninstall() according to \p arg.
*/
int drm_control( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
int drm_control(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
@ -204,21 +202,21 @@ int drm_control( struct inode *inode, struct file *filp,
/* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
if ( copy_from_user( &ctl, (drm_control_t __user *)arg, sizeof(ctl) ) )
if (copy_from_user(&ctl, (drm_control_t __user *) arg, sizeof(ctl)))
return -EFAULT;
switch ( ctl.func ) {
switch (ctl.func) {
case DRM_INST_HANDLER:
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return 0;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
ctl.irq != dev->irq)
return -EINVAL;
return drm_irq_install( dev );
return drm_irq_install(dev);
case DRM_UNINST_HANDLER:
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return 0;
return drm_irq_uninstall( dev );
return drm_irq_uninstall(dev);
default:
return -EINVAL;
}
@ -243,7 +241,7 @@ int drm_control( struct inode *inode, struct file *filp,
*
* If a signal is not requested, then calls vblank_wait().
*/
int drm_wait_vblank( DRM_IOCTL_ARGS )
int drm_wait_vblank(DRM_IOCTL_ARGS)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
@ -259,11 +257,11 @@ int drm_wait_vblank( DRM_IOCTL_ARGS )
if (!dev->irq)
return -EINVAL;
DRM_COPY_FROM_USER_IOCTL( vblwait, argp, sizeof(vblwait) );
DRM_COPY_FROM_USER_IOCTL(vblwait, argp, sizeof(vblwait));
switch ( vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK ) {
switch (vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK) {
case _DRM_VBLANK_RELATIVE:
vblwait.request.sequence += atomic_read( &dev->vbl_received );
vblwait.request.sequence += atomic_read(&dev->vbl_received);
vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
case _DRM_VBLANK_ABSOLUTE:
break;
@ -273,63 +271,67 @@ int drm_wait_vblank( DRM_IOCTL_ARGS )
flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
if ( flags & _DRM_VBLANK_SIGNAL ) {
if (flags & _DRM_VBLANK_SIGNAL) {
unsigned long irqflags;
drm_vbl_sig_t *vbl_sig;
vblwait.reply.sequence = atomic_read( &dev->vbl_received );
vblwait.reply.sequence = atomic_read(&dev->vbl_received);
spin_lock_irqsave( &dev->vbl_lock, irqflags );
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Check if this task has already scheduled the same signal
* for the same vblank sequence number; nothing to be done in
* that case
*/
list_for_each_entry( vbl_sig, &dev->vbl_sigs.head, head ) {
list_for_each_entry(vbl_sig, &dev->vbl_sigs.head, head) {
if (vbl_sig->sequence == vblwait.request.sequence
&& vbl_sig->info.si_signo == vblwait.request.signal
&& vbl_sig->task == current)
{
spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
&& vbl_sig->task == current) {
spin_unlock_irqrestore(&dev->vbl_lock,
irqflags);
goto done;
}
}
if ( dev->vbl_pending >= 100 ) {
spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
if (dev->vbl_pending >= 100) {
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
return -EBUSY;
}
dev->vbl_pending++;
spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
if ( !( vbl_sig = drm_alloc( sizeof( drm_vbl_sig_t ), DRM_MEM_DRIVER ) ) ) {
if (!
(vbl_sig =
drm_alloc(sizeof(drm_vbl_sig_t), DRM_MEM_DRIVER))) {
return -ENOMEM;
}
memset( (void *)vbl_sig, 0, sizeof(*vbl_sig) );
memset((void *)vbl_sig, 0, sizeof(*vbl_sig));
vbl_sig->sequence = vblwait.request.sequence;
vbl_sig->info.si_signo = vblwait.request.signal;
vbl_sig->task = current;
spin_lock_irqsave( &dev->vbl_lock, irqflags );
spin_lock_irqsave(&dev->vbl_lock, irqflags);
list_add_tail( (struct list_head *) vbl_sig, &dev->vbl_sigs.head );
list_add_tail((struct list_head *)vbl_sig, &dev->vbl_sigs.head);
spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
} else {
if (dev->fn_tbl->vblank_wait)
ret = dev->fn_tbl->vblank_wait( dev, &vblwait.request.sequence );
ret =
dev->fn_tbl->vblank_wait(dev,
&vblwait.request.sequence);
do_gettimeofday( &now );
do_gettimeofday(&now);
vblwait.reply.tval_sec = now.tv_sec;
vblwait.reply.tval_usec = now.tv_usec;
}
done:
DRM_COPY_TO_USER_IOCTL( argp, vblwait, sizeof(vblwait) );
done:
DRM_COPY_TO_USER_IOCTL(argp, vblwait, sizeof(vblwait));
return ret;
}
@ -343,31 +345,30 @@ done:
*
* If a signal is not requested, then calls vblank_wait().
*/
void drm_vbl_send_signals( drm_device_t *dev )
void drm_vbl_send_signals(drm_device_t * dev)
{
struct list_head *list, *tmp;
drm_vbl_sig_t *vbl_sig;
unsigned int vbl_seq = atomic_read( &dev->vbl_received );
unsigned int vbl_seq = atomic_read(&dev->vbl_received);
unsigned long flags;
spin_lock_irqsave( &dev->vbl_lock, flags );
spin_lock_irqsave(&dev->vbl_lock, flags);
list_for_each_safe( list, tmp, &dev->vbl_sigs.head ) {
vbl_sig = list_entry( list, drm_vbl_sig_t, head );
if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) {
list_for_each_safe(list, tmp, &dev->vbl_sigs.head) {
vbl_sig = list_entry(list, drm_vbl_sig_t, head);
if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
vbl_sig->info.si_code = vbl_seq;
send_sig_info( vbl_sig->info.si_signo, &vbl_sig->info, vbl_sig->task );
send_sig_info(vbl_sig->info.si_signo, &vbl_sig->info,
vbl_sig->task);
list_del( list );
list_del(list);
drm_free( vbl_sig, sizeof(*vbl_sig), DRM_MEM_DRIVER );
drm_free(vbl_sig, sizeof(*vbl_sig), DRM_MEM_DRIVER);
dev->vbl_pending--;
}
}
spin_unlock_irqrestore( &dev->vbl_lock, flags );
spin_unlock_irqrestore(&dev->vbl_lock, flags);
}
EXPORT_SYMBOL(drm_vbl_send_signals);

View File

@ -35,7 +35,6 @@
#include "drmP.h"
/**
* Lock ioctl.
*
@ -47,86 +46,83 @@
*
* Add the current task to the lock wait queue, and attempt to take to lock.
*/
int drm_lock( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
int drm_lock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
DECLARE_WAITQUEUE( entry, current );
drm_lock_t lock;
int ret = 0;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
DECLARE_WAITQUEUE(entry, current);
drm_lock_t lock;
int ret = 0;
++priv->lock_count;
if ( copy_from_user( &lock, (drm_lock_t __user *)arg, sizeof(lock) ) )
if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock)))
return -EFAULT;
if ( lock.context == DRM_KERNEL_CONTEXT ) {
DRM_ERROR( "Process %d using kernel context %d\n",
current->pid, lock.context );
return -EINVAL;
}
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
current->pid, lock.context);
return -EINVAL;
}
DRM_DEBUG( "%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock.context, current->pid,
dev->lock.hw_lock->lock, lock.flags );
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock.context, current->pid,
dev->lock.hw_lock->lock, lock.flags);
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
if ( lock.context < 0 )
if (lock.context < 0)
return -EINVAL;
add_wait_queue( &dev->lock.lock_queue, &entry );
add_wait_queue(&dev->lock.lock_queue, &entry);
for (;;) {
current->state = TASK_INTERRUPTIBLE;
if ( !dev->lock.hw_lock ) {
if (!dev->lock.hw_lock) {
/* Device has been unregistered */
ret = -EINTR;
break;
}
if ( drm_lock_take( &dev->lock.hw_lock->lock,
lock.context ) ) {
dev->lock.filp = filp;
if (drm_lock_take(&dev->lock.hw_lock->lock, lock.context)) {
dev->lock.filp = filp;
dev->lock.lock_time = jiffies;
atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
break; /* Got lock */
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
break; /* Got lock */
}
/* Contention */
schedule();
if ( signal_pending( current ) ) {
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
}
current->state = TASK_RUNNING;
remove_wait_queue( &dev->lock.lock_queue, &entry );
remove_wait_queue(&dev->lock.lock_queue, &entry);
sigemptyset( &dev->sigmask );
sigaddset( &dev->sigmask, SIGSTOP );
sigaddset( &dev->sigmask, SIGTSTP );
sigaddset( &dev->sigmask, SIGTTIN );
sigaddset( &dev->sigmask, SIGTTOU );
sigemptyset(&dev->sigmask);
sigaddset(&dev->sigmask, SIGSTOP);
sigaddset(&dev->sigmask, SIGTSTP);
sigaddset(&dev->sigmask, SIGTTIN);
sigaddset(&dev->sigmask, SIGTTOU);
dev->sigdata.context = lock.context;
dev->sigdata.lock = dev->lock.hw_lock;
block_all_signals( drm_notifier,
&dev->sigdata, &dev->sigmask );
dev->sigdata.lock = dev->lock.hw_lock;
block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
if (dev->fn_tbl->dma_ready && (lock.flags & _DRM_LOCK_READY))
dev->fn_tbl->dma_ready(dev);
if ( dev->fn_tbl->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT ))
if (dev->fn_tbl->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT))
return dev->fn_tbl->dma_quiescent(dev);
if ( dev->fn_tbl->kernel_context_switch && dev->last_context != lock.context ) {
if (dev->fn_tbl->kernel_context_switch
&& dev->last_context != lock.context) {
dev->fn_tbl->kernel_context_switch(dev, dev->last_context,
lock.context);
lock.context);
}
DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
return ret;
return ret;
}
/**
@ -140,34 +136,33 @@ int drm_lock( struct inode *inode, struct file *filp,
*
* Transfer and free the lock.
*/
int drm_unlock( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
int drm_unlock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_lock_t lock;
if ( copy_from_user( &lock, (drm_lock_t __user *)arg, sizeof(lock) ) )
if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock)))
return -EFAULT;
if ( lock.context == DRM_KERNEL_CONTEXT ) {
DRM_ERROR( "Process %d using kernel context %d\n",
current->pid, lock.context );
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
current->pid, lock.context);
return -EINVAL;
}
atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] );
atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
if (dev->fn_tbl->kernel_context_switch_unlock)
dev->fn_tbl->kernel_context_switch_unlock(dev);
else
{
drm_lock_transfer( dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT );
else {
drm_lock_transfer(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT);
if ( drm_lock_free( dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT ) ) {
DRM_ERROR( "\n" );
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
}
@ -190,8 +185,10 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
do {
old = *lock;
if (old & _DRM_LOCK_HELD) new = old | _DRM_LOCK_CONT;
else new = context | _DRM_LOCK_HELD;
if (old & _DRM_LOCK_HELD)
new = old | _DRM_LOCK_CONT;
else
new = context | _DRM_LOCK_HELD;
prev = cmpxchg(lock, old, new);
} while (prev != old);
if (_DRM_LOCKING_CONTEXT(old) == context) {
@ -204,7 +201,7 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
}
}
if (new == (context | _DRM_LOCK_HELD)) {
/* Have lock */
/* Have lock */
return 1;
}
return 0;
@ -222,15 +219,15 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
* Resets the lock file pointer.
* Marks the lock as held by the given context, via the \p cmpxchg instruction.
*/
int drm_lock_transfer(drm_device_t *dev,
__volatile__ unsigned int *lock, unsigned int context)
int drm_lock_transfer(drm_device_t * dev,
__volatile__ unsigned int *lock, unsigned int context)
{
unsigned int old, new, prev;
dev->lock.filp = NULL;
do {
old = *lock;
new = context | _DRM_LOCK_HELD;
old = *lock;
new = context | _DRM_LOCK_HELD;
prev = cmpxchg(lock, old, new);
} while (prev != old);
return 1;
@ -247,21 +244,20 @@ int drm_lock_transfer(drm_device_t *dev,
* Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
* waiting on the lock queue.
*/
int drm_lock_free(drm_device_t *dev,
__volatile__ unsigned int *lock, unsigned int context)
int drm_lock_free(drm_device_t * dev,
__volatile__ unsigned int *lock, unsigned int context)
{
unsigned int old, new, prev;
dev->lock.filp = NULL;
do {
old = *lock;
new = 0;
old = *lock;
new = 0;
prev = cmpxchg(lock, old, new);
} while (prev != old);
if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
DRM_ERROR("%d freed heavyweight lock held by %d\n",
context,
_DRM_LOCKING_CONTEXT(old));
context, _DRM_LOCKING_CONTEXT(old));
return 1;
}
wake_up_interruptible(&dev->lock.lock_queue);
@ -281,19 +277,19 @@ int drm_lock_free(drm_device_t *dev,
*/
int drm_notifier(void *priv)
{
drm_sigdata_t *s = (drm_sigdata_t *)priv;
unsigned int old, new, prev;
drm_sigdata_t *s = (drm_sigdata_t *) priv;
unsigned int old, new, prev;
/* Allow signal delivery if lock isn't held */
/* Allow signal delivery if lock isn't held */
if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock)
|| _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context) return 1;
|| _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context)
return 1;
/* Otherwise, set flag to force call to
drmUnlock */
/* Otherwise, set flag to force call to
drmUnlock */
do {
old = s->lock->lock;
new = old | _DRM_LOCK_CONT;
old = s->lock->lock;
new = old | _DRM_LOCK_CONT;
prev = cmpxchg(&s->lock->lock, old, new);
} while (prev != old);
return 0;

View File

@ -60,7 +60,7 @@ void drm_mem_init(void)
* No-op.
*/
int drm_mem_info(char *buf, char **start, off_t offset,
int len, int *eof, void *data)
int len, int *eof, void *data)
{
return 0;
}
@ -83,7 +83,8 @@ void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
{
void *pt;
if (!(pt = kmalloc(size, GFP_KERNEL))) return NULL;
if (!(pt = kmalloc(size, GFP_KERNEL)))
return NULL;
if (oldpt && oldsize) {
memcpy(pt, oldpt, oldsize);
kfree(oldpt);
@ -103,21 +104,20 @@ void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
unsigned long drm_alloc_pages(int order, int area)
{
unsigned long address;
unsigned long bytes = PAGE_SIZE << order;
unsigned long bytes = PAGE_SIZE << order;
unsigned long addr;
unsigned int sz;
unsigned int sz;
address = __get_free_pages(GFP_KERNEL, order);
if (!address)
return 0;
/* Zero */
/* Zero */
memset((void *)address, 0, bytes);
/* Reserve */
/* Reserve */
for (addr = address, sz = bytes;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
SetPageReserved(virt_to_page(addr));
}
@ -137,22 +137,20 @@ void drm_free_pages(unsigned long address, int order, int area)
{
unsigned long bytes = PAGE_SIZE << order;
unsigned long addr;
unsigned int sz;
unsigned int sz;
if (!address)
return;
/* Unreserve */
for (addr = address, sz = bytes;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
}
free_pages(address, order);
}
#if __OS_HAS_AGP
/** Wrapper around agp_allocate_memory() */
DRM_AGP_MEM *drm_alloc_agp(int pages, u32 type)
@ -161,21 +159,21 @@ DRM_AGP_MEM *drm_alloc_agp(int pages, u32 type)
}
/** Wrapper around agp_free_memory() */
int drm_free_agp(DRM_AGP_MEM *handle, int pages)
int drm_free_agp(DRM_AGP_MEM * handle, int pages)
{
return drm_agp_free_memory(handle) ? 0 : -EINVAL;
}
/** Wrapper around agp_bind_memory() */
int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start)
int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
{
return drm_agp_bind_memory(handle, start);
}
/** Wrapper around agp_unbind_memory() */
int drm_unbind_agp(DRM_AGP_MEM *handle)
int drm_unbind_agp(DRM_AGP_MEM * handle)
{
return drm_agp_unbind_memory(handle);
}
#endif /* agp */
#endif /* debug_memory */
#endif /* agp */
#endif /* debug_memory */

View File

@ -72,8 +72,8 @@
/*
* Find the drm_map that covers the range [offset, offset+size).
*/
static inline drm_map_t *
drm_lookup_map (unsigned long offset, unsigned long size, drm_device_t *dev)
static inline drm_map_t *drm_lookup_map(unsigned long offset,
unsigned long size, drm_device_t * dev)
{
struct list_head *list;
drm_map_list_t *r_list;
@ -84,16 +84,18 @@ drm_lookup_map (unsigned long offset, unsigned long size, drm_device_t *dev)
map = r_list->map;
if (!map)
continue;
if (map->offset <= offset && (offset + size) <= (map->offset + map->size))
if (map->offset <= offset
&& (offset + size) <= (map->offset + map->size))
return map;
}
return NULL;
}
static inline void *
agp_remap (unsigned long offset, unsigned long size, drm_device_t *dev)
static inline void *agp_remap(unsigned long offset, unsigned long size,
drm_device_t * dev)
{
unsigned long *phys_addr_map, i, num_pages = PAGE_ALIGN(size) / PAGE_SIZE;
unsigned long *phys_addr_map, i, num_pages =
PAGE_ALIGN(size) / PAGE_SIZE;
struct drm_agp_mem *agpmem;
struct page **page_map;
void *addr;
@ -106,7 +108,8 @@ agp_remap (unsigned long offset, unsigned long size, drm_device_t *dev)
for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next)
if (agpmem->bound <= offset
&& (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= (offset + size))
&& (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
(offset + size))
break;
if (!agpmem)
return NULL;
@ -121,7 +124,8 @@ agp_remap (unsigned long offset, unsigned long size, drm_device_t *dev)
if (!page_map)
return NULL;
phys_addr_map = agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
phys_addr_map =
agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
for (i = 0; i < num_pages; ++i)
page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
@ -130,35 +134,37 @@ agp_remap (unsigned long offset, unsigned long size, drm_device_t *dev)
return addr;
}
static inline unsigned long
drm_follow_page (void *vaddr)
static inline unsigned long drm_follow_page(void *vaddr)
{
pgd_t *pgd = pgd_offset_k((unsigned long) vaddr);
pmd_t *pmd = pmd_offset(pgd, (unsigned long) vaddr);
pte_t *ptep = pte_offset_kernel(pmd, (unsigned long) vaddr);
pgd_t *pgd = pgd_offset_k((unsigned long)vaddr);
pmd_t *pmd = pmd_offset(pgd, (unsigned long)vaddr);
pte_t *ptep = pte_offset_kernel(pmd, (unsigned long)vaddr);
return pte_pfn(*ptep) << PAGE_SHIFT;
}
#else /* __OS_HAS_AGP */
#else /* __OS_HAS_AGP */
static inline drm_map_t *drm_lookup_map(unsigned long offset, unsigned long size, drm_device_t *dev)
static inline drm_map_t *drm_lookup_map(unsigned long offset,
unsigned long size, drm_device_t * dev)
{
return NULL;
return NULL;
}
static inline void *agp_remap(unsigned long offset, unsigned long size, drm_device_t *dev)
static inline void *agp_remap(unsigned long offset, unsigned long size,
drm_device_t * dev)
{
return NULL;
return NULL;
}
static inline unsigned long drm_follow_page (void *vaddr)
static inline unsigned long drm_follow_page(void *vaddr)
{
return 0;
return 0;
}
#endif
static inline void *drm_ioremap(unsigned long offset, unsigned long size, drm_device_t *dev)
static inline void *drm_ioremap(unsigned long offset, unsigned long size,
drm_device_t * dev)
{
#if defined(VMAP_4_ARGS)
if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
@ -172,8 +178,8 @@ static inline void *drm_ioremap(unsigned long offset, unsigned long size, drm_de
return ioremap(offset, size);
}
static inline void *drm_ioremap_nocache(unsigned long offset, unsigned long size,
drm_device_t *dev)
static inline void *drm_ioremap_nocache(unsigned long offset,
unsigned long size, drm_device_t * dev)
{
#if defined(VMAP_4_ARGS)
if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
@ -187,7 +193,8 @@ static inline void *drm_ioremap_nocache(unsigned long offset, unsigned long size
return ioremap_nocache(offset, size);
}
static inline void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev)
static inline void drm_ioremapfree(void *pt, unsigned long size,
drm_device_t * dev)
{
#if defined(VMAP_4_ARGS)
/*
@ -196,12 +203,12 @@ static inline void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *d
* a future revision of the interface...
*/
if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture
&& ((unsigned long) pt >= VMALLOC_START && (unsigned long) pt < VMALLOC_END))
{
&& ((unsigned long)pt >= VMALLOC_START
&& (unsigned long)pt < VMALLOC_END)) {
unsigned long offset;
drm_map_t *map;
offset = drm_follow_page(pt) | ((unsigned long) pt & ~PAGE_MASK);
offset = drm_follow_page(pt) | ((unsigned long)pt & ~PAGE_MASK);
map = drm_lookup_map(offset, size, dev);
if (map && map->type == _DRM_AGP) {
vunmap(pt);
@ -209,6 +216,5 @@ static inline void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *d
}
}
#endif
iounmap(pt);
}

View File

@ -35,75 +35,75 @@
#include "drmP.h"
typedef struct drm_mem_stats {
const char *name;
int succeed_count;
int free_count;
int fail_count;
unsigned long bytes_allocated;
unsigned long bytes_freed;
const char *name;
int succeed_count;
int free_count;
int fail_count;
unsigned long bytes_allocated;
unsigned long bytes_freed;
} drm_mem_stats_t;
static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED;
static unsigned long drm_ram_available = 0; /* In pages */
static unsigned long drm_ram_used = 0;
static drm_mem_stats_t drm_mem_stats[] = {
[DRM_MEM_DMA] = { "dmabufs" },
[DRM_MEM_SAREA] = { "sareas" },
[DRM_MEM_DRIVER] = { "driver" },
[DRM_MEM_MAGIC] = { "magic" },
[DRM_MEM_IOCTLS] = { "ioctltab" },
[DRM_MEM_MAPS] = { "maplist" },
[DRM_MEM_VMAS] = { "vmalist" },
[DRM_MEM_BUFS] = { "buflist" },
[DRM_MEM_SEGS] = { "seglist" },
[DRM_MEM_PAGES] = { "pagelist" },
[DRM_MEM_FILES] = { "files" },
[DRM_MEM_QUEUES] = { "queues" },
[DRM_MEM_CMDS] = { "commands" },
[DRM_MEM_MAPPINGS] = { "mappings" },
[DRM_MEM_BUFLISTS] = { "buflists" },
[DRM_MEM_AGPLISTS] = { "agplist" },
[DRM_MEM_SGLISTS] = { "sglist" },
[DRM_MEM_TOTALAGP] = { "totalagp" },
[DRM_MEM_BOUNDAGP] = { "boundagp" },
[DRM_MEM_CTXBITMAP] = { "ctxbitmap"},
[DRM_MEM_CTXLIST] = { "ctxlist" },
[DRM_MEM_STUB] = { "stub" },
{ NULL, 0, } /* Last entry must be null */
static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED;
static unsigned long drm_ram_available = 0; /* In pages */
static unsigned long drm_ram_used = 0;
static drm_mem_stats_t drm_mem_stats[] = {
[DRM_MEM_DMA] = {"dmabufs"},
[DRM_MEM_SAREA] = {"sareas"},
[DRM_MEM_DRIVER] = {"driver"},
[DRM_MEM_MAGIC] = {"magic"},
[DRM_MEM_IOCTLS] = {"ioctltab"},
[DRM_MEM_MAPS] = {"maplist"},
[DRM_MEM_VMAS] = {"vmalist"},
[DRM_MEM_BUFS] = {"buflist"},
[DRM_MEM_SEGS] = {"seglist"},
[DRM_MEM_PAGES] = {"pagelist"},
[DRM_MEM_FILES] = {"files"},
[DRM_MEM_QUEUES] = {"queues"},
[DRM_MEM_CMDS] = {"commands"},
[DRM_MEM_MAPPINGS] = {"mappings"},
[DRM_MEM_BUFLISTS] = {"buflists"},
[DRM_MEM_AGPLISTS] = {"agplist"},
[DRM_MEM_SGLISTS] = {"sglist"},
[DRM_MEM_TOTALAGP] = {"totalagp"},
[DRM_MEM_BOUNDAGP] = {"boundagp"},
[DRM_MEM_CTXBITMAP] = {"ctxbitmap"},
[DRM_MEM_CTXLIST] = {"ctxlist"},
[DRM_MEM_STUB] = {"stub"},
{NULL, 0,} /* Last entry must be null */
};
void drm_mem_init(void)
{
drm_mem_stats_t *mem;
struct sysinfo si;
struct sysinfo si;
for (mem = drm_mem_stats; mem->name; ++mem) {
mem->succeed_count = 0;
mem->free_count = 0;
mem->fail_count = 0;
mem->succeed_count = 0;
mem->free_count = 0;
mem->fail_count = 0;
mem->bytes_allocated = 0;
mem->bytes_freed = 0;
mem->bytes_freed = 0;
}
si_meminfo(&si);
drm_ram_available = si.totalram;
drm_ram_used = 0;
drm_ram_used = 0;
}
/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
static int drm__mem_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
int request, int *eof, void *data)
{
drm_mem_stats_t *pt;
int len = 0;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*eof = 0;
*eof = 0;
*start = &buf[offset];
DRM_PROC_PRINT(" total counts "
@ -129,13 +129,14 @@ static int drm__mem_info(char *buf, char **start, off_t offset,
- (long)pt->bytes_freed);
}
if (len > request + offset) return request;
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
int drm_mem_info(char *buf, char **start, off_t offset,
int len, int *eof, void *data)
int len, int *eof, void *data)
{
int ret;
@ -182,7 +183,8 @@ void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
{
void *pt;
if (!(pt = drm_alloc(size, area))) return NULL;
if (!(pt = drm_alloc(size, area)))
return NULL;
if (oldpt && oldsize) {
memcpy(pt, oldpt, oldsize);
drm_free(oldpt, oldsize, area);
@ -195,12 +197,14 @@ void drm_free(void *pt, size_t size, int area)
int alloc_count;
int free_count;
if (!pt) DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
else kfree(pt);
if (!pt)
DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
else
kfree(pt);
spin_lock(&drm_mem_lock);
drm_mem_stats[area].bytes_freed += size;
free_count = ++drm_mem_stats[area].free_count;
alloc_count = drm_mem_stats[area].succeed_count;
free_count = ++drm_mem_stats[area].free_count;
alloc_count = drm_mem_stats[area].succeed_count;
spin_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
@ -211,9 +215,9 @@ void drm_free(void *pt, size_t size, int area)
unsigned long drm_alloc_pages(int order, int area)
{
unsigned long address;
unsigned long bytes = PAGE_SIZE << order;
unsigned long bytes = PAGE_SIZE << order;
unsigned long addr;
unsigned int sz;
unsigned int sz;
spin_lock(&drm_mem_lock);
if ((drm_ram_used >> PAGE_SHIFT)
@ -233,17 +237,15 @@ unsigned long drm_alloc_pages(int order, int area)
spin_lock(&drm_mem_lock);
++drm_mem_stats[area].succeed_count;
drm_mem_stats[area].bytes_allocated += bytes;
drm_ram_used += bytes;
drm_ram_used += bytes;
spin_unlock(&drm_mem_lock);
/* Zero outside the lock */
/* Zero outside the lock */
memset((void *)address, 0, bytes);
/* Reserve */
/* Reserve */
for (addr = address, sz = bytes;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
SetPageReserved(virt_to_page(addr));
}
@ -253,28 +255,27 @@ unsigned long drm_alloc_pages(int order, int area)
void drm_free_pages(unsigned long address, int order, int area)
{
unsigned long bytes = PAGE_SIZE << order;
int alloc_count;
int free_count;
int alloc_count;
int free_count;
unsigned long addr;
unsigned int sz;
unsigned int sz;
if (!address) {
DRM_MEM_ERROR(area, "Attempt to free address 0\n");
} else {
/* Unreserve */
/* Unreserve */
for (addr = address, sz = bytes;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
}
free_pages(address, order);
}
spin_lock(&drm_mem_lock);
free_count = ++drm_mem_stats[area].free_count;
alloc_count = drm_mem_stats[area].succeed_count;
free_count = ++drm_mem_stats[area].free_count;
alloc_count = drm_mem_stats[area].succeed_count;
drm_mem_stats[area].bytes_freed += bytes;
drm_ram_used -= bytes;
drm_ram_used -= bytes;
spin_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(area,
@ -283,7 +284,7 @@ void drm_free_pages(unsigned long address, int order, int area)
}
}
void *drm_ioremap(unsigned long offset, unsigned long size, drm_device_t *dev)
void *drm_ioremap(unsigned long offset, unsigned long size, drm_device_t * dev)
{
void *pt;
@ -306,7 +307,8 @@ void *drm_ioremap(unsigned long offset, unsigned long size, drm_device_t *dev)
return pt;
}
void *drm_ioremap_nocache(unsigned long offset, unsigned long size, drm_device_t *dev)
void *drm_ioremap_nocache(unsigned long offset, unsigned long size,
drm_device_t * dev)
{
void *pt;
@ -329,7 +331,7 @@ void *drm_ioremap_nocache(unsigned long offset, unsigned long size, drm_device_t
return pt;
}
void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev)
void drm_ioremapfree(void *pt, unsigned long size, drm_device_t * dev)
{
int alloc_count;
int free_count;
@ -342,8 +344,8 @@ void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev)
spin_lock(&drm_mem_lock);
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size;
free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count;
alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count;
alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
spin_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
@ -367,7 +369,7 @@ DRM_AGP_MEM *drm_alloc_agp(int pages, u32 type)
spin_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated
+= pages << PAGE_SHIFT;
+= pages << PAGE_SHIFT;
spin_unlock(&drm_mem_lock);
return handle;
}
@ -377,11 +379,11 @@ DRM_AGP_MEM *drm_alloc_agp(int pages, u32 type)
return NULL;
}
int drm_free_agp(DRM_AGP_MEM *handle, int pages)
int drm_free_agp(DRM_AGP_MEM * handle, int pages)
{
int alloc_count;
int free_count;
int retval = -EINVAL;
int alloc_count;
int free_count;
int retval = -EINVAL;
if (!handle) {
DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
@ -391,10 +393,10 @@ int drm_free_agp(DRM_AGP_MEM *handle, int pages)
if (drm_agp_free_memory(handle)) {
spin_lock(&drm_mem_lock);
free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count;
alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count;
alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed
+= pages << PAGE_SHIFT;
+= pages << PAGE_SHIFT;
spin_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
@ -406,7 +408,7 @@ int drm_free_agp(DRM_AGP_MEM *handle, int pages)
return retval;
}
int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start)
int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
{
int retcode = -EINVAL;
@ -420,7 +422,7 @@ int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start)
spin_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated
+= handle->page_count << PAGE_SHIFT;
+= handle->page_count << PAGE_SHIFT;
spin_unlock(&drm_mem_lock);
return retcode;
}
@ -430,7 +432,7 @@ int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start)
return retcode;
}
int drm_unbind_agp(DRM_AGP_MEM *handle)
int drm_unbind_agp(DRM_AGP_MEM * handle)
{
int alloc_count;
int free_count;
@ -442,12 +444,13 @@ int drm_unbind_agp(DRM_AGP_MEM *handle)
return retcode;
}
if ((retcode = drm_agp_unbind_memory(handle))) return retcode;
if ((retcode = drm_agp_unbind_memory(handle)))
return retcode;
spin_lock(&drm_mem_lock);
free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count;
free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count;
alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed
+= handle->page_count << PAGE_SHIFT;
+= handle->page_count << PAGE_SHIFT;
spin_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,

View File

@ -35,75 +35,74 @@
#include "drmP.h"
typedef struct drm_mem_stats {
const char *name;
int succeed_count;
int free_count;
int fail_count;
unsigned long bytes_allocated;
unsigned long bytes_freed;
const char *name;
int succeed_count;
int free_count;
int fail_count;
unsigned long bytes_allocated;
unsigned long bytes_freed;
} drm_mem_stats_t;
static spinlock_t DRM(mem_lock) = SPIN_LOCK_UNLOCKED;
static unsigned long DRM(ram_available) = 0; /* In pages */
static unsigned long DRM(ram_used) = 0;
static drm_mem_stats_t DRM(mem_stats)[] = {
[DRM_MEM_DMA] = { "dmabufs" },
[DRM_MEM_SAREA] = { "sareas" },
[DRM_MEM_DRIVER] = { "driver" },
[DRM_MEM_MAGIC] = { "magic" },
[DRM_MEM_IOCTLS] = { "ioctltab" },
[DRM_MEM_MAPS] = { "maplist" },
[DRM_MEM_VMAS] = { "vmalist" },
[DRM_MEM_BUFS] = { "buflist" },
[DRM_MEM_SEGS] = { "seglist" },
[DRM_MEM_PAGES] = { "pagelist" },
[DRM_MEM_FILES] = { "files" },
[DRM_MEM_QUEUES] = { "queues" },
[DRM_MEM_CMDS] = { "commands" },
[DRM_MEM_MAPPINGS] = { "mappings" },
[DRM_MEM_BUFLISTS] = { "buflists" },
[DRM_MEM_AGPLISTS] = { "agplist" },
[DRM_MEM_SGLISTS] = { "sglist" },
[DRM_MEM_TOTALAGP] = { "totalagp" },
[DRM_MEM_BOUNDAGP] = { "boundagp" },
[DRM_MEM_CTXBITMAP] = { "ctxbitmap"},
[DRM_MEM_CTXLIST] = { "ctxlist" },
[DRM_MEM_STUB] = { "stub" },
{ NULL, 0, } /* Last entry must be null */
static spinlock_t DRM(mem_lock) = SPIN_LOCK_UNLOCKED;
static unsigned long DRM(ram_available) = 0; /* In pages */
static unsigned long DRM(ram_used) = 0;
static drm_mem_stats_t DRM(mem_stats)[] =
{
[DRM_MEM_DMA] = {"dmabufs"},
[DRM_MEM_SAREA] = {"sareas"},
[DRM_MEM_DRIVER] = {"driver"},
[DRM_MEM_MAGIC] = {"magic"},
[DRM_MEM_IOCTLS] = {"ioctltab"},
[DRM_MEM_MAPS] = {"maplist"},
[DRM_MEM_VMAS] = {"vmalist"},
[DRM_MEM_BUFS] = {"buflist"},
[DRM_MEM_SEGS] = {"seglist"},
[DRM_MEM_PAGES] = {"pagelist"},
[DRM_MEM_FILES] = {"files"},
[DRM_MEM_QUEUES] = {"queues"},
[DRM_MEM_CMDS] = {"commands"},
[DRM_MEM_MAPPINGS] = {"mappings"},
[DRM_MEM_BUFLISTS] = {"buflists"},
[DRM_MEM_AGPLISTS] = {"agplist"},
[DRM_MEM_SGLISTS] = {"sglist"},
[DRM_MEM_TOTALAGP] = {"totalagp"},
[DRM_MEM_BOUNDAGP] = {"boundagp"},
[DRM_MEM_CTXBITMAP] = {"ctxbitmap"},
[DRM_MEM_CTXLIST] = {"ctxlist"},
[DRM_MEM_STUB] = {"stub"},
{NULL, 0,} /* Last entry must be null */
};
void DRM(mem_init)(void)
{
void DRM(mem_init) (void) {
drm_mem_stats_t *mem;
struct sysinfo si;
struct sysinfo si;
for (mem = DRM(mem_stats); mem->name; ++mem) {
mem->succeed_count = 0;
mem->free_count = 0;
mem->fail_count = 0;
mem->succeed_count = 0;
mem->free_count = 0;
mem->fail_count = 0;
mem->bytes_allocated = 0;
mem->bytes_freed = 0;
mem->bytes_freed = 0;
}
si_meminfo(&si);
DRM(ram_available) = si.totalram;
DRM(ram_used) = 0;
DRM(ram_used) = 0;
}
/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
static int DRM(_mem_info)(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
static int DRM(_mem_info) (char *buf, char **start, off_t offset,
int request, int *eof, void *data) {
drm_mem_stats_t *pt;
int len = 0;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*eof = 0;
*eof = 0;
*start = &buf[offset];
DRM_PROC_PRINT(" total counts "
@ -129,24 +128,23 @@ static int DRM(_mem_info)(char *buf, char **start, off_t offset,
- (long)pt->bytes_freed);
}
if (len > request + offset) return request;
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
int DRM(mem_info)(char *buf, char **start, off_t offset,
int len, int *eof, void *data)
{
int DRM(mem_info) (char *buf, char **start, off_t offset,
int len, int *eof, void *data) {
int ret;
spin_lock(&DRM(mem_lock));
ret = DRM(_mem_info)(buf, start, offset, len, eof, data);
ret = DRM(_mem_info) (buf, start, offset, len, eof, data);
spin_unlock(&DRM(mem_lock));
return ret;
}
void *DRM(alloc)(size_t size, int area)
{
void *DRM(alloc) (size_t size, int area) {
void *pt;
if (!size) {
@ -167,40 +165,40 @@ void *DRM(alloc)(size_t size, int area)
return pt;
}
void *DRM(calloc)(size_t nmemb, size_t size, int area)
{
void *DRM(calloc) (size_t nmemb, size_t size, int area) {
void *addr;
addr = DRM(alloc)(nmemb * size, area);
addr = DRM(alloc) (nmemb * size, area);
if (addr != NULL)
memset((void *)addr, 0, size * nmemb);
return addr;
}
void *DRM(realloc)(void *oldpt, size_t oldsize, size_t size, int area)
{
void *DRM(realloc) (void *oldpt, size_t oldsize, size_t size, int area) {
void *pt;
if (!(pt = DRM(alloc)(size, area))) return NULL;
if (!(pt = DRM(alloc) (size, area)))
return NULL;
if (oldpt && oldsize) {
memcpy(pt, oldpt, oldsize);
DRM(free)(oldpt, oldsize, area);
DRM(free) (oldpt, oldsize, area);
}
return pt;
}
void DRM(free)(void *pt, size_t size, int area)
{
void DRM(free) (void *pt, size_t size, int area) {
int alloc_count;
int free_count;
if (!pt) DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
else kfree(pt);
if (!pt)
DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
else
kfree(pt);
spin_lock(&DRM(mem_lock));
DRM(mem_stats)[area].bytes_freed += size;
free_count = ++DRM(mem_stats)[area].free_count;
alloc_count = DRM(mem_stats)[area].succeed_count;
free_count = ++DRM(mem_stats)[area].free_count;
alloc_count = DRM(mem_stats)[area].succeed_count;
spin_unlock(&DRM(mem_lock));
if (free_count > alloc_count) {
DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
@ -208,12 +206,11 @@ void DRM(free)(void *pt, size_t size, int area)
}
}
unsigned long DRM(alloc_pages)(int order, int area)
{
unsigned long DRM(alloc_pages) (int order, int area) {
unsigned long address;
unsigned long bytes = PAGE_SIZE << order;
unsigned long bytes = PAGE_SIZE << order;
unsigned long addr;
unsigned int sz;
unsigned int sz;
spin_lock(&DRM(mem_lock));
if ((DRM(ram_used) >> PAGE_SHIFT)
@ -233,48 +230,44 @@ unsigned long DRM(alloc_pages)(int order, int area)
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[area].succeed_count;
DRM(mem_stats)[area].bytes_allocated += bytes;
DRM(ram_used) += bytes;
DRM(ram_used) += bytes;
spin_unlock(&DRM(mem_lock));
/* Zero outside the lock */
/* Zero outside the lock */
memset((void *)address, 0, bytes);
/* Reserve */
/* Reserve */
for (addr = address, sz = bytes;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
SetPageReserved(virt_to_page(addr));
}
return address;
}
void DRM(free_pages)(unsigned long address, int order, int area)
{
void DRM(free_pages) (unsigned long address, int order, int area) {
unsigned long bytes = PAGE_SIZE << order;
int alloc_count;
int free_count;
int alloc_count;
int free_count;
unsigned long addr;
unsigned int sz;
unsigned int sz;
if (!address) {
DRM_MEM_ERROR(area, "Attempt to free address 0\n");
} else {
/* Unreserve */
/* Unreserve */
for (addr = address, sz = bytes;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
}
free_pages(address, order);
}
spin_lock(&DRM(mem_lock));
free_count = ++DRM(mem_stats)[area].free_count;
alloc_count = DRM(mem_stats)[area].succeed_count;
free_count = ++DRM(mem_stats)[area].free_count;
alloc_count = DRM(mem_stats)[area].succeed_count;
DRM(mem_stats)[area].bytes_freed += bytes;
DRM(ram_used) -= bytes;
DRM(ram_used) -= bytes;
spin_unlock(&DRM(mem_lock));
if (free_count > alloc_count) {
DRM_MEM_ERROR(area,
@ -283,8 +276,8 @@ void DRM(free_pages)(unsigned long address, int order, int area)
}
}
void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev)
{
void *DRM(ioremap) (unsigned long offset, unsigned long size,
drm_device_t * dev) {
void *pt;
if (!size) {
@ -306,8 +299,8 @@ void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev)
return pt;
}
void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, drm_device_t *dev)
{
void *DRM(ioremap_nocache) (unsigned long offset, unsigned long size,
drm_device_t * dev) {
void *pt;
if (!size) {
@ -329,8 +322,7 @@ void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, drm_device_
return pt;
}
void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev)
{
void DRM(ioremapfree) (void *pt, unsigned long size, drm_device_t * dev) {
int alloc_count;
int free_count;
@ -342,8 +334,8 @@ void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev)
spin_lock(&DRM(mem_lock));
DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_freed += size;
free_count = ++DRM(mem_stats)[DRM_MEM_MAPPINGS].free_count;
alloc_count = DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
free_count = ++DRM(mem_stats)[DRM_MEM_MAPPINGS].free_count;
alloc_count = DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
spin_unlock(&DRM(mem_lock));
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
@ -354,8 +346,7 @@ void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev)
#if __OS_HAS_AGP
DRM_AGP_MEM *DRM(alloc_agp)(int pages, u32 type)
{
DRM_AGP_MEM *DRM(alloc_agp) (int pages, u32 type) {
DRM_AGP_MEM *handle;
if (!pages) {
@ -363,11 +354,11 @@ DRM_AGP_MEM *DRM(alloc_agp)(int pages, u32 type)
return NULL;
}
if ((handle = DRM(agp_allocate_memory)(pages, type))) {
if ((handle = DRM(agp_allocate_memory) (pages, type))) {
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_allocated
+= pages << PAGE_SHIFT;
+= pages << PAGE_SHIFT;
spin_unlock(&DRM(mem_lock));
return handle;
}
@ -377,11 +368,10 @@ DRM_AGP_MEM *DRM(alloc_agp)(int pages, u32 type)
return NULL;
}
int DRM(free_agp)(DRM_AGP_MEM *handle, int pages)
{
int alloc_count;
int free_count;
int retval = -EINVAL;
int DRM(free_agp) (DRM_AGP_MEM * handle, int pages) {
int alloc_count;
int free_count;
int retval = -EINVAL;
if (!handle) {
DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
@ -389,12 +379,12 @@ int DRM(free_agp)(DRM_AGP_MEM *handle, int pages)
return retval;
}
if (DRM(agp_free_memory)(handle)) {
if (DRM(agp_free_memory) (handle)) {
spin_lock(&DRM(mem_lock));
free_count = ++DRM(mem_stats)[DRM_MEM_TOTALAGP].free_count;
alloc_count = DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
free_count = ++DRM(mem_stats)[DRM_MEM_TOTALAGP].free_count;
alloc_count = DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_freed
+= pages << PAGE_SHIFT;
+= pages << PAGE_SHIFT;
spin_unlock(&DRM(mem_lock));
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
@ -406,8 +396,7 @@ int DRM(free_agp)(DRM_AGP_MEM *handle, int pages)
return retval;
}
int DRM(bind_agp)(DRM_AGP_MEM *handle, unsigned int start)
{
int DRM(bind_agp) (DRM_AGP_MEM * handle, unsigned int start) {
int retcode = -EINVAL;
if (!handle) {
@ -416,11 +405,11 @@ int DRM(bind_agp)(DRM_AGP_MEM *handle, unsigned int start)
return retcode;
}
if (!(retcode = DRM(agp_bind_memory)(handle, start))) {
if (!(retcode = DRM(agp_bind_memory) (handle, start))) {
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_allocated
+= handle->page_count << PAGE_SHIFT;
+= handle->page_count << PAGE_SHIFT;
spin_unlock(&DRM(mem_lock));
return retcode;
}
@ -430,8 +419,7 @@ int DRM(bind_agp)(DRM_AGP_MEM *handle, unsigned int start)
return retcode;
}
int DRM(unbind_agp)(DRM_AGP_MEM *handle)
{
int DRM(unbind_agp) (DRM_AGP_MEM * handle) {
int alloc_count;
int free_count;
int retcode = -EINVAL;
@ -442,12 +430,13 @@ int DRM(unbind_agp)(DRM_AGP_MEM *handle)
return retcode;
}
if ((retcode = DRM(agp_unbind_memory)(handle))) return retcode;
if ((retcode = DRM(agp_unbind_memory) (handle)))
return retcode;
spin_lock(&DRM(mem_lock));
free_count = ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].free_count;
free_count = ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].free_count;
alloc_count = DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_freed
+= handle->page_count << PAGE_SHIFT;
+= handle->page_count << PAGE_SHIFT;
spin_unlock(&DRM(mem_lock));
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,

View File

@ -3,7 +3,6 @@
* OS abstraction macros.
*/
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
@ -15,7 +14,7 @@
/** Current process ID */
#define DRM_CURRENTPID current->pid
#define DRM_UDELAY(d) udelay(d)
#if LINUX_VERSION_CODE <= 0x020608 /* KERNEL_VERSION(2,6,8) */
#if LINUX_VERSION_CODE <= 0x020608 /* KERNEL_VERSION(2,6,8) */
/** Read a byte from a MMIO region */
#define DRM_READ8(map, offset) readb(((unsigned long)(map)->handle) + (offset))
/** Read a word from a MMIO region */
@ -57,8 +56,8 @@
/** backwards compatibility with old irq return values */
#ifndef IRQ_HANDLED
typedef void irqreturn_t;
#define IRQ_HANDLED /* nothing */
#define IRQ_NONE /* nothing */
#define IRQ_HANDLED /* nothing */
#define IRQ_NONE /* nothing */
#endif
/** AGP types */
@ -81,17 +80,17 @@ struct no_agp_kern {
#endif
#if !(__OS_HAS_MTRR)
static __inline__ int mtrr_add (unsigned long base, unsigned long size,
unsigned int type, char increment)
static __inline__ int mtrr_add(unsigned long base, unsigned long size,
unsigned int type, char increment)
{
return -ENODEV;
}
static __inline__ int mtrr_del (int reg, unsigned long base,
unsigned long size)
static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
{
return -ENODEV;
}
#define MTRR_TYPE_WRCOMB 1
#endif
@ -124,7 +123,6 @@ static __inline__ int mtrr_del (int reg, unsigned long base,
#define DRM_PUT_USER_UNCHECKED(uaddr, val) \
__put_user(val, uaddr)
#define DRM_GET_PRIV_WITH_RETURN(_priv, _filp) _priv = _filp->private_data
/**
@ -171,7 +169,5 @@ do { \
remove_wait_queue(&(queue), &entry); \
} while (0)
#define DRM_WAKEUP( queue ) wake_up_interruptible( queue )
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )

View File

@ -9,7 +9,7 @@
* \todo Add support to map these buffers.
* \todo The wrappers here are so thin that they would be better off inlined..
*
* \author Jos<EFBFBD>Fonseca <jrfonseca@tungstengraphics.com>
* \author Jose Fonseca <jrfonseca@tungstengraphics.com>
* \author Leif Delgass <ldelgass@retinalburn.net>
*/
@ -37,7 +37,6 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/pci.h>
#include "drmP.h"
@ -45,13 +44,11 @@
/** \name PCI memory */
/*@{*/
/**
* \brief Allocate a PCI consistent memory block, for DMA.
*/
void *
drm_pci_alloc(drm_device_t *dev, size_t size, size_t align,
dma_addr_t maxaddr, dma_addr_t *busaddr)
void *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
dma_addr_t maxaddr, dma_addr_t * busaddr)
{
void *address;
#if 0
@ -77,12 +74,12 @@ drm_pci_alloc(drm_device_t *dev, size_t size, size_t align,
if (align > size)
return NULL;
if (pci_set_dma_mask( dev->pdev, maxaddr ) != 0) {
DRM_ERROR( "Setting pci dma mask failed\n" );
if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) {
DRM_ERROR("Setting pci dma mask failed\n");
return NULL;
}
address = pci_alloc_consistent( dev->pdev, size, busaddr );
address = pci_alloc_consistent(dev->pdev, size, busaddr);
#if DRM_DEBUG_MEMORY
if (address == NULL) {
@ -95,7 +92,7 @@ drm_pci_alloc(drm_device_t *dev, size_t size, size_t align,
spin_lock(&drm_mem_lock);
++drm_mem_stats[area].succeed_count;
drm_mem_stats[area].bytes_allocated += size;
drm_ram_used += size;
drm_ram_used += size;
spin_unlock(&drm_mem_lock);
#else
if (address == NULL)
@ -106,10 +103,9 @@ drm_pci_alloc(drm_device_t *dev, size_t size, size_t align,
#if 0
/* XXX - Is virt_to_page() legal for consistent mem? */
/* Reserve */
/* Reserve */
for (addr = (unsigned long)address, sz = size;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
SetPageReserved(virt_to_page(addr));
}
#endif
@ -122,7 +118,7 @@ EXPORT_SYMBOL(drm_pci_alloc);
* \brief Free a PCI consistent memory block.
*/
void
drm_pci_free(drm_device_t *dev, size_t size, void *vaddr, dma_addr_t busaddr)
drm_pci_free(drm_device_t * dev, size_t size, void *vaddr, dma_addr_t busaddr)
{
#if 0
unsigned long addr;
@ -141,22 +137,21 @@ drm_pci_free(drm_device_t *dev, size_t size, void *vaddr, dma_addr_t busaddr)
} else {
#if 0
/* XXX - Is virt_to_page() legal for consistent mem? */
/* Unreserve */
/* Unreserve */
for (addr = (unsigned long)vaddr, sz = size;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
}
#endif
pci_free_consistent( dev->pdev, size, vaddr, busaddr );
pci_free_consistent(dev->pdev, size, vaddr, busaddr);
}
#if DRM_DEBUG_MEMORY
spin_lock(&drm_mem_lock);
free_count = ++drm_mem_stats[area].free_count;
alloc_count = drm_mem_stats[area].succeed_count;
free_count = ++drm_mem_stats[area].free_count;
alloc_count = drm_mem_stats[area].succeed_count;
drm_mem_stats[area].bytes_freed += size;
drm_ram_used -= size;
drm_ram_used -= size;
spin_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(area,

View File

@ -39,19 +39,19 @@
#include "drmP.h"
static int drm_name_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_vm_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_clients_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_queues_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_bufs_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_name_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_vm_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_clients_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_queues_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_bufs_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
#if DRM_DEBUG_CODE
static int drm_vma_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_vma_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
#endif
/**
@ -59,18 +59,19 @@ static int drm_vma_info(char *buf, char **start, off_t offset,
*/
struct drm_proc_list {
const char *name; /**< file name */
int (*f)(char *, char **, off_t, int, int *, void *); /**< proc callback*/
int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/
} drm_proc_list[] = {
{ "name", drm_name_info },
{ "mem", drm_mem_info },
{ "vm", drm_vm_info },
{ "clients", drm_clients_info },
{ "queues", drm_queues_info },
{ "bufs", drm_bufs_info },
{"name", drm_name_info},
{"mem", drm_mem_info},
{"vm", drm_vm_info},
{"clients", drm_clients_info},
{"queues", drm_queues_info},
{"bufs", drm_bufs_info},
#if DRM_DEBUG_CODE
{ "vma", drm_vma_info },
{"vma", drm_vma_info},
#endif
};
#define DRM_PROC_ENTRIES (sizeof(drm_proc_list)/sizeof(drm_proc_list[0]))
/**
@ -86,13 +87,12 @@ struct drm_proc_list {
* "/proc/dri/%minor%/", and each entry in proc_list as
* "/proc/dri/%minor%/%name%".
*/
int drm_proc_init(drm_device_t *dev, int minor,
struct proc_dir_entry *root,
struct proc_dir_entry **dev_root)
int drm_proc_init(drm_device_t * dev, int minor,
struct proc_dir_entry *root, struct proc_dir_entry **dev_root)
{
struct proc_dir_entry *ent;
int i, j;
char name[64];
int i, j;
char name[64];
sprintf(name, "%d", minor);
*dev_root = create_proc_entry(name, S_IFDIR, root);
@ -103,7 +103,7 @@ int drm_proc_init(drm_device_t *dev, int minor,
for (i = 0; i < DRM_PROC_ENTRIES; i++) {
ent = create_proc_entry(drm_proc_list[i].name,
S_IFREG|S_IRUGO, *dev_root);
S_IFREG | S_IRUGO, *dev_root);
if (!ent) {
DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
name, drm_proc_list[i].name);
@ -114,12 +114,11 @@ int drm_proc_init(drm_device_t *dev, int minor,
return -1;
}
ent->read_proc = drm_proc_list[i].f;
ent->data = dev;
ent->data = dev;
}
return 0;
}
/**
* Cleanup the proc filesystem resources.
*
@ -131,12 +130,13 @@ int drm_proc_init(drm_device_t *dev, int minor,
* Remove all proc entries created by proc_init().
*/
int drm_proc_cleanup(int minor, struct proc_dir_entry *root,
struct proc_dir_entry *dev_root)
struct proc_dir_entry *dev_root)
{
int i;
int i;
char name[64];
if (!root || !dev_root) return 0;
if (!root || !dev_root)
return 0;
for (i = 0; i < DRM_PROC_ENTRIES; i++)
remove_proc_entry(drm_proc_list[i].name, dev_root);
@ -160,10 +160,10 @@ int drm_proc_cleanup(int minor, struct proc_dir_entry *root,
* Prints the device name together with the bus id if available.
*/
static int drm_name_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int len = 0;
drm_device_t *dev = (drm_device_t *) data;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
@ -171,16 +171,19 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request,
}
*start = &buf[offset];
*eof = 0;
*eof = 0;
if (dev->unique) {
DRM_PROC_PRINT("%s 0x%lx %s\n",
dev->name, (long)old_encode_dev(dev->device), dev->unique);
dev->name, (long)old_encode_dev(dev->device),
dev->unique);
} else {
DRM_PROC_PRINT("%s 0x%lx\n", dev->name, (long)old_encode_dev(dev->device));
DRM_PROC_PRINT("%s 0x%lx\n", dev->name,
(long)old_encode_dev(dev->device));
}
if (len > request + offset) return request;
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
@ -199,20 +202,20 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request,
* Prints information about all mappings in drm_device::maplist.
*/
static int drm__vm_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int len = 0;
drm_map_t *map;
drm_device_t *dev = (drm_device_t *) data;
int len = 0;
drm_map_t *map;
drm_map_list_t *r_list;
struct list_head *list;
/* Hardcoded from _DRM_FRAME_BUFFER,
_DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
_DRM_SCATTER_GATHER. */
const char *types[] = { "FB", "REG", "SHM", "AGP", "SG" };
const char *type;
int i;
/* Hardcoded from _DRM_FRAME_BUFFER,
_DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
_DRM_SCATTER_GATHER. */
const char *types[] = { "FB", "REG", "SHM", "AGP", "SG" };
const char *type;
int i;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
@ -220,33 +223,36 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
}
*start = &buf[offset];
*eof = 0;
*eof = 0;
DRM_PROC_PRINT("slot offset size type flags "
"address mtrr\n\n");
i = 0;
if (dev->maplist != NULL) list_for_each(list, &dev->maplist->head) {
if (dev->maplist != NULL)
list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head);
map = r_list->map;
if(!map) continue;
if (map->type < 0 || map->type > 4) type = "??";
else type = types[map->type];
if (!map)
continue;
if (map->type < 0 || map->type > 4)
type = "??";
else
type = types[map->type];
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
i,
map->offset,
map->size,
type,
map->flags,
(unsigned long)map->handle);
type, map->flags, (unsigned long)map->handle);
if (map->mtrr < 0) {
DRM_PROC_PRINT("none\n");
} else {
DRM_PROC_PRINT("%4d\n", map->mtrr);
}
i++;
}
}
if (len > request + offset) return request;
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
@ -255,10 +261,10 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
* Simply calls _vm_info() while holding the drm_device::struct_sem lock.
*/
static int drm_vm_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int ret;
drm_device_t *dev = (drm_device_t *) data;
int ret;
down(&dev->struct_sem);
ret = drm__vm_info(buf, start, offset, request, eof, data);
@ -278,12 +284,12 @@ static int drm_vm_info(char *buf, char **start, off_t offset, int request,
* \return number of written bytes.
*/
static int drm__queues_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
int request, int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int len = 0;
int i;
drm_queue_t *q;
drm_device_t *dev = (drm_device_t *) data;
int len = 0;
int i;
drm_queue_t *q;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
@ -291,7 +297,7 @@ static int drm__queues_info(char *buf, char **start, off_t offset,
}
*start = &buf[offset];
*eof = 0;
*eof = 0;
DRM_PROC_PRINT(" ctx/flags use fin"
" blk/rw/rwf wait flushed queued"
@ -309,14 +315,17 @@ static int drm__queues_info(char *buf, char **start, off_t offset,
atomic_read(&q->block_count),
atomic_read(&q->block_read) ? 'r' : '-',
atomic_read(&q->block_write) ? 'w' : '-',
waitqueue_active(&q->read_queue) ? 'r':'-',
waitqueue_active(&q->write_queue) ? 'w':'-',
waitqueue_active(&q->flush_queue) ? 'f':'-',
waitqueue_active(&q->read_queue) ? 'r' : '-',
waitqueue_active(&q->
write_queue) ? 'w' : '-',
waitqueue_active(&q->
flush_queue) ? 'f' : '-',
DRM_BUFCOUNT(&q->waitlist));
atomic_dec(&q->use_count);
}
if (len > request + offset) return request;
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
@ -325,10 +334,10 @@ static int drm__queues_info(char *buf, char **start, off_t offset,
* Simply calls _queues_info() while holding the drm_device::struct_sem lock.
*/
static int drm_queues_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int ret;
drm_device_t *dev = (drm_device_t *) data;
int ret;
down(&dev->struct_sem);
ret = drm__queues_info(buf, start, offset, request, eof, data);
@ -348,12 +357,12 @@ static int drm_queues_info(char *buf, char **start, off_t offset, int request,
* \return number of written bytes.
*/
static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int len = 0;
drm_device_t *dev = (drm_device_t *) data;
int len = 0;
drm_device_dma_t *dma = dev->dma;
int i;
int i;
if (!dma || offset > DRM_PROC_LIMIT) {
*eof = 1;
@ -361,7 +370,7 @@ static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
}
*start = &buf[offset];
*eof = 0;
*eof = 0;
DRM_PROC_PRINT(" o size count free segs pages kB\n\n");
for (i = 0; i <= DRM_MAX_ORDER; i++) {
@ -374,19 +383,21 @@ static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
.freelist.count),
dma->bufs[i].seg_count,
dma->bufs[i].seg_count
*(1 << dma->bufs[i].page_order),
* (1 << dma->bufs[i].page_order),
(dma->bufs[i].seg_count
* (1 << dma->bufs[i].page_order))
* PAGE_SIZE / 1024);
}
DRM_PROC_PRINT("\n");
for (i = 0; i < dma->buf_count; i++) {
if (i && !(i%32)) DRM_PROC_PRINT("\n");
if (i && !(i % 32))
DRM_PROC_PRINT("\n");
DRM_PROC_PRINT(" %d", dma->buflist[i]->list);
}
DRM_PROC_PRINT("\n");
if (len > request + offset) return request;
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
@ -395,10 +406,10 @@ static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
* Simply calls _bufs_info() while holding the drm_device::struct_sem lock.
*/
static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int ret;
drm_device_t *dev = (drm_device_t *) data;
int ret;
down(&dev->struct_sem);
ret = drm__bufs_info(buf, start, offset, request, eof, data);
@ -418,11 +429,11 @@ static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
* \return number of written bytes.
*/
static int drm__clients_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
int request, int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int len = 0;
drm_file_t *priv;
drm_device_t *dev = (drm_device_t *) data;
int len = 0;
drm_file_t *priv;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
@ -430,7 +441,7 @@ static int drm__clients_info(char *buf, char **start, off_t offset,
}
*start = &buf[offset];
*eof = 0;
*eof = 0;
DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n");
for (priv = dev->file_first; priv; priv = priv->next) {
@ -438,12 +449,11 @@ static int drm__clients_info(char *buf, char **start, off_t offset,
priv->authenticated ? 'y' : 'n',
priv->minor,
priv->pid,
priv->uid,
priv->magic,
priv->ioctl_count);
priv->uid, priv->magic, priv->ioctl_count);
}
if (len > request + offset) return request;
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
@ -452,10 +462,10 @@ static int drm__clients_info(char *buf, char **start, off_t offset,
* Simply calls _clients_info() while holding the drm_device::struct_sem lock.
*/
static int drm_clients_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
int request, int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int ret;
drm_device_t *dev = (drm_device_t *) data;
int ret;
down(&dev->struct_sem);
ret = drm__clients_info(buf, start, offset, request, eof, data);
@ -466,14 +476,14 @@ static int drm_clients_info(char *buf, char **start, off_t offset,
#if DRM_DEBUG_CODE
static int drm__vma_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int len = 0;
drm_vma_entry_t *pt;
drm_device_t *dev = (drm_device_t *) data;
int len = 0;
drm_vma_entry_t *pt;
struct vm_area_struct *vma;
#if defined(__i386__)
unsigned int pgprot;
unsigned int pgprot;
#endif
if (offset > DRM_PROC_LIMIT) {
@ -482,51 +492,53 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
}
*start = &buf[offset];
*eof = 0;
*eof = 0;
DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
atomic_read(&dev->vma_count),
high_memory, virt_to_phys(high_memory));
for (pt = dev->vmalist; pt; pt = pt->next) {
if (!(vma = pt->vma)) continue;
if (!(vma = pt->vma))
continue;
DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx",
pt->pid,
vma->vm_start,
vma->vm_end,
vma->vm_flags & VM_READ ? 'r' : '-',
vma->vm_flags & VM_WRITE ? 'w' : '-',
vma->vm_flags & VM_EXEC ? 'x' : '-',
vma->vm_flags & VM_READ ? 'r' : '-',
vma->vm_flags & VM_WRITE ? 'w' : '-',
vma->vm_flags & VM_EXEC ? 'x' : '-',
vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
vma->vm_flags & VM_LOCKED ? 'l' : '-',
vma->vm_flags & VM_IO ? 'i' : '-',
vma->vm_flags & VM_LOCKED ? 'l' : '-',
vma->vm_flags & VM_IO ? 'i' : '-',
VM_OFFSET(vma));
#if defined(__i386__)
pgprot = pgprot_val(vma->vm_page_prot);
DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c",
pgprot & _PAGE_PRESENT ? 'p' : '-',
pgprot & _PAGE_RW ? 'w' : 'r',
pgprot & _PAGE_USER ? 'u' : 's',
pgprot & _PAGE_PWT ? 't' : 'b',
pgprot & _PAGE_PCD ? 'u' : 'c',
pgprot & _PAGE_PRESENT ? 'p' : '-',
pgprot & _PAGE_RW ? 'w' : 'r',
pgprot & _PAGE_USER ? 'u' : 's',
pgprot & _PAGE_PWT ? 't' : 'b',
pgprot & _PAGE_PCD ? 'u' : 'c',
pgprot & _PAGE_ACCESSED ? 'a' : '-',
pgprot & _PAGE_DIRTY ? 'd' : '-',
pgprot & _PAGE_PSE ? 'm' : 'k',
pgprot & _PAGE_GLOBAL ? 'g' : 'l' );
pgprot & _PAGE_DIRTY ? 'd' : '-',
pgprot & _PAGE_PSE ? 'm' : 'k',
pgprot & _PAGE_GLOBAL ? 'g' : 'l');
#endif
DRM_PROC_PRINT("\n");
}
if (len > request + offset) return request;
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static int drm_vma_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int ret;
drm_device_t *dev = (drm_device_t *) data;
int ret;
down(&dev->struct_sem);
ret = drm__vma_info(buf, start, offset, request, eof, data);
@ -534,5 +546,3 @@ static int drm_vma_info(char *buf, char **start, off_t offset, int request,
return ret;
}
#endif

View File

@ -33,36 +33,33 @@
#include <linux/config.h>
#include <linux/vmalloc.h>
#include "drmP.h"
#define DEBUG_SCATTER 0
void drm_sg_cleanup( drm_sg_mem_t *entry )
void drm_sg_cleanup(drm_sg_mem_t * entry)
{
struct page *page;
int i;
for ( i = 0 ; i < entry->pages ; i++ ) {
for (i = 0; i < entry->pages; i++) {
page = entry->pagelist[i];
if ( page )
ClearPageReserved( page );
if (page)
ClearPageReserved(page);
}
vfree( entry->virtual );
vfree(entry->virtual);
drm_free( entry->busaddr,
entry->pages * sizeof(*entry->busaddr),
DRM_MEM_PAGES );
drm_free( entry->pagelist,
entry->pages * sizeof(*entry->pagelist),
DRM_MEM_PAGES );
drm_free( entry,
sizeof(*entry),
DRM_MEM_SGLISTS );
drm_free(entry->busaddr,
entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES);
drm_free(entry->pagelist,
entry->pages * sizeof(*entry->pagelist), DRM_MEM_PAGES);
drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
}
int drm_sg_alloc( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
int drm_sg_alloc(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
@ -71,74 +68,69 @@ int drm_sg_alloc( struct inode *inode, struct file *filp,
drm_sg_mem_t *entry;
unsigned long pages, i, j;
DRM_DEBUG( "%s\n", __FUNCTION__ );
DRM_DEBUG("%s\n", __FUNCTION__);
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EINVAL;
if ( dev->sg )
if (dev->sg)
return -EINVAL;
if ( copy_from_user( &request, argp, sizeof(request) ) )
if (copy_from_user(&request, argp, sizeof(request)))
return -EFAULT;
entry = drm_alloc( sizeof(*entry), DRM_MEM_SGLISTS );
if ( !entry )
entry = drm_alloc(sizeof(*entry), DRM_MEM_SGLISTS);
if (!entry)
return -ENOMEM;
memset( entry, 0, sizeof(*entry) );
memset(entry, 0, sizeof(*entry));
pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE;
DRM_DEBUG( "sg size=%ld pages=%ld\n", request.size, pages );
DRM_DEBUG("sg size=%ld pages=%ld\n", request.size, pages);
entry->pages = pages;
entry->pagelist = drm_alloc( pages * sizeof(*entry->pagelist),
DRM_MEM_PAGES );
if ( !entry->pagelist ) {
drm_free( entry, sizeof(*entry), DRM_MEM_SGLISTS );
entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist),
DRM_MEM_PAGES);
if (!entry->pagelist) {
drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
return -ENOMEM;
}
memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist));
entry->busaddr = drm_alloc( pages * sizeof(*entry->busaddr),
DRM_MEM_PAGES );
if ( !entry->busaddr ) {
drm_free( entry->pagelist,
entry->pages * sizeof(*entry->pagelist),
DRM_MEM_PAGES );
drm_free( entry,
sizeof(*entry),
DRM_MEM_SGLISTS );
entry->busaddr = drm_alloc(pages * sizeof(*entry->busaddr),
DRM_MEM_PAGES);
if (!entry->busaddr) {
drm_free(entry->pagelist,
entry->pages * sizeof(*entry->pagelist),
DRM_MEM_PAGES);
drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
return -ENOMEM;
}
memset( (void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr) );
memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr));
entry->virtual = vmalloc_32( pages << PAGE_SHIFT );
if ( !entry->virtual ) {
drm_free( entry->busaddr,
entry->pages * sizeof(*entry->busaddr),
DRM_MEM_PAGES );
drm_free( entry->pagelist,
entry->pages * sizeof(*entry->pagelist),
DRM_MEM_PAGES );
drm_free( entry,
sizeof(*entry),
DRM_MEM_SGLISTS );
entry->virtual = vmalloc_32(pages << PAGE_SHIFT);
if (!entry->virtual) {
drm_free(entry->busaddr,
entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES);
drm_free(entry->pagelist,
entry->pages * sizeof(*entry->pagelist),
DRM_MEM_PAGES);
drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
return -ENOMEM;
}
/* This also forces the mapping of COW pages, so our page list
* will be valid. Please don't remove it...
*/
memset( entry->virtual, 0, pages << PAGE_SHIFT );
memset(entry->virtual, 0, pages << PAGE_SHIFT);
entry->handle = (unsigned long)entry->virtual;
DRM_DEBUG( "sg alloc handle = %08lx\n", entry->handle );
DRM_DEBUG( "sg alloc virtual = %p\n", entry->virtual );
DRM_DEBUG("sg alloc handle = %08lx\n", entry->handle);
DRM_DEBUG("sg alloc virtual = %p\n", entry->virtual);
for ( i = entry->handle, j = 0 ; j < pages ; i += PAGE_SIZE, j++ ) {
for (i = entry->handle, j = 0; j < pages; i += PAGE_SIZE, j++) {
entry->pagelist[j] = vmalloc_to_page((void *)i);
if (!entry->pagelist[j])
goto failed;
@ -147,8 +139,8 @@ int drm_sg_alloc( struct inode *inode, struct file *filp,
request.handle = entry->handle;
if ( copy_to_user( argp, &request, sizeof(request) ) ) {
drm_sg_cleanup( entry );
if (copy_to_user(argp, &request, sizeof(request))) {
drm_sg_cleanup(entry);
return -EFAULT;
}
@ -159,50 +151,50 @@ int drm_sg_alloc( struct inode *inode, struct file *filp,
* versa.
*/
{
int error = 0;
int error = 0;
for ( i = 0 ; i < pages ; i++ ) {
unsigned long *tmp;
for (i = 0; i < pages; i++) {
unsigned long *tmp;
tmp = page_address( entry->pagelist[i] );
for ( j = 0 ;
j < PAGE_SIZE / sizeof(unsigned long) ;
j++, tmp++ ) {
*tmp = 0xcafebabe;
}
tmp = (unsigned long *)((u8 *)entry->virtual +
(PAGE_SIZE * i));
for( j = 0 ;
j < PAGE_SIZE / sizeof(unsigned long) ;
j++, tmp++ ) {
if ( *tmp != 0xcafebabe && error == 0 ) {
error = 1;
DRM_ERROR( "Scatter allocation error, "
"pagelist does not match "
"virtual mapping\n" );
tmp = page_address(entry->pagelist[i]);
for (j = 0;
j < PAGE_SIZE / sizeof(unsigned long);
j++, tmp++) {
*tmp = 0xcafebabe;
}
tmp = (unsigned long *)((u8 *) entry->virtual +
(PAGE_SIZE * i));
for (j = 0;
j < PAGE_SIZE / sizeof(unsigned long);
j++, tmp++) {
if (*tmp != 0xcafebabe && error == 0) {
error = 1;
DRM_ERROR("Scatter allocation error, "
"pagelist does not match "
"virtual mapping\n");
}
}
tmp = page_address(entry->pagelist[i]);
for (j = 0;
j < PAGE_SIZE / sizeof(unsigned long);
j++, tmp++) {
*tmp = 0;
}
}
tmp = page_address( entry->pagelist[i] );
for(j = 0 ;
j < PAGE_SIZE / sizeof(unsigned long) ;
j++, tmp++) {
*tmp = 0;
}
}
if (error == 0)
DRM_ERROR( "Scatter allocation matches pagelist\n" );
if (error == 0)
DRM_ERROR("Scatter allocation matches pagelist\n");
}
#endif
return 0;
failed:
drm_sg_cleanup( entry );
failed:
drm_sg_cleanup(entry);
return -ENOMEM;
}
int drm_sg_free( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
int drm_sg_free(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
@ -212,20 +204,20 @@ int drm_sg_free( struct inode *inode, struct file *filp,
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EINVAL;
if ( copy_from_user( &request,
(drm_scatter_gather_t __user *)arg,
sizeof(request) ) )
if (copy_from_user(&request,
(drm_scatter_gather_t __user *) arg,
sizeof(request)))
return -EFAULT;
entry = dev->sg;
dev->sg = NULL;
if ( !entry || entry->handle != request.handle )
if (!entry || entry->handle != request.handle)
return -EINVAL;
DRM_DEBUG( "sg free virtual = %p\n", entry->virtual );
DRM_DEBUG("sg free virtual = %p\n", entry->virtual);
drm_sg_cleanup( entry );
drm_sg_cleanup(entry);
return 0;
}

View File

@ -32,14 +32,15 @@
*/
#include <linux/module.h>
#include "drmP.h"
#include "drm_core.h"
unsigned int cards_limit = 16; /* Enough for one machine */
unsigned int debug = 0; /* 1 to enable debug output */
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(cards_limit, "Maximum number of graphics cards");
MODULE_PARM_DESC(debug, "Enable debug output");
@ -51,20 +52,22 @@ drm_minor_t *drm_minors;
struct drm_sysfs_class *drm_class;
struct proc_dir_entry *drm_proc_root;
static int fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver_fn *driver_fn)
static int fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver_fn *driver_fn)
{
int retcode;
dev->count_lock = SPIN_LOCK_UNLOCKED;
init_timer( &dev->timer );
sema_init( &dev->struct_sem, 1 );
sema_init( &dev->ctxlist_sem, 1 );
init_timer(&dev->timer);
sema_init(&dev->struct_sem, 1);
sema_init(&dev->ctxlist_sem, 1);
dev->name = DRIVER_NAME;
dev->pdev = pdev;
dev->name = DRIVER_NAME;
dev->pdev = pdev;
#ifdef __alpha__
dev->hose = pdev->sysdata;
dev->hose = pdev->sysdata;
dev->pci_domain = dev->hose->bus->number;
#else
dev->pci_domain = 0;
@ -75,7 +78,8 @@ static int fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct pci
dev->irq = pdev->irq;
dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS);
if(dev->maplist == NULL) return -ENOMEM;
if (dev->maplist == NULL)
return -ENOMEM;
INIT_LIST_HEAD(&dev->maplist->head);
/* the DRM has 6 counters */
@ -95,29 +99,29 @@ static int fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct pci
if (drm_core_has_AGP(dev)) {
dev->agp = drm_agp_init();
if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && (dev->agp == NULL)) {
DRM_ERROR( "Cannot initialize the agpgart module.\n" );
if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
&& (dev->agp == NULL)) {
DRM_ERROR("Cannot initialize the agpgart module.\n");
retcode = -EINVAL;
goto error_out_unreg;
}
if (drm_core_has_MTRR(dev)) {
if (dev->agp)
dev->agp->agp_mtrr = mtrr_add( dev->agp->agp_info.aper_base,
dev->agp->agp_info.aper_size*1024*1024,
MTRR_TYPE_WRCOMB,
1 );
dev->agp->agp_mtrr =
mtrr_add(dev->agp->agp_info.aper_base,
dev->agp->agp_info.aper_size *
1024 * 1024, MTRR_TYPE_WRCOMB, 1);
}
}
retcode = drm_ctxbitmap_init( dev );
if( retcode ) {
DRM_ERROR( "Cannot allocate memory for context bitmap.\n" );
retcode = drm_ctxbitmap_init(dev);
if (retcode) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
goto error_out_unreg;
}
dev->device = MKDEV(DRM_MAJOR, dev->minor );
dev->device = MKDEV(DRM_MAJOR, dev->minor);
/* postinit is a required function to display the signon banner */
/* drivers add secondary heads here if needed */
@ -126,7 +130,7 @@ static int fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct pci
return 0;
error_out_unreg:
error_out_unreg:
drm_takedown(dev);
return retcode;
}
@ -142,7 +146,8 @@ static int fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct pci
* then register the character device and inter module information.
* Try and register, if we fail to register, backout previous work.
*/
int drm_probe(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver_fn *driver_fn)
int drm_probe(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver_fn *driver_fn)
{
struct class_device *dev_class;
drm_device_t *dev;
@ -157,17 +162,21 @@ int drm_probe(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_
DRM_DEBUG("assigning minor %d\n", minor);
dev = drm_calloc(1, sizeof(*dev), DRM_MEM_STUB);
if(!dev)
if (!dev)
return -ENOMEM;
*minors = (drm_minor_t){.dev = dev, .class = DRM_MINOR_PRIMARY};
*minors = (drm_minor_t) {
.dev = dev,.class = DRM_MINOR_PRIMARY};
dev->minor = minor;
if ((ret = fill_in_dev(dev, pdev, ent, driver_fn))) {
printk (KERN_ERR "DRM: Fill_in_dev failed.\n");
printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
goto err_g1;
}
if ((ret = drm_proc_init(dev, minor, drm_proc_root, &minors->dev_root))) {
printk (KERN_ERR "DRM: Failed to initialize /proc/dri.\n");
if ((ret =
drm_proc_init(dev, minor, drm_proc_root,
&minors->dev_root))) {
printk(KERN_ERR
"DRM: Failed to initialize /proc/dri.\n");
goto err_g1;
}
if (!drm_fb_loaded) {
@ -176,9 +185,13 @@ int drm_probe(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_
pci_enable_device(pdev);
}
dev_class = drm_sysfs_device_add(drm_class,
MKDEV(DRM_MAJOR, minor), DRM_PCI_DEV(pdev), "card%d", minor);
MKDEV(DRM_MAJOR,
minor),
DRM_PCI_DEV(pdev),
"card%d", minor);
if (IS_ERR(dev_class)) {
printk (KERN_ERR "DRM: Error sysfs_device_add.\n");
printk(KERN_ERR
"DRM: Error sysfs_device_add.\n");
ret = PTR_ERR(dev_class);
goto err_g2;
}
@ -189,15 +202,16 @@ int drm_probe(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_
}
DRM_ERROR("out of minors\n");
return -ENOMEM;
err_g2:
err_g2:
if (!drm_fb_loaded) {
pci_set_drvdata(pdev, NULL);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
drm_proc_cleanup(minor, drm_proc_root, minors->dev_root);
err_g1:
*minors = (drm_minor_t){.dev = NULL, .class = DRM_MINOR_FREE};
err_g1:
*minors = (drm_minor_t) {
.dev = NULL,.class = DRM_MINOR_FREE};
drm_free(dev, sizeof(*dev), DRM_MEM_STUB);
return ret;
}
@ -214,7 +228,7 @@ EXPORT_SYMBOL(drm_probe);
* create the proc init entry via proc_init(). This routines assigns
* minor numbers to secondary heads of multi-headed cards
*/
int drm_get_secondary_minor(drm_device_t *dev, drm_minor_t **sec_minor)
int drm_get_secondary_minor(drm_device_t * dev, drm_minor_t ** sec_minor)
{
drm_minor_t *minors = &drm_minors[0];
struct class_device *dev_class;
@ -226,16 +240,24 @@ int drm_get_secondary_minor(drm_device_t *dev, drm_minor_t **sec_minor)
for (minor = 0; minor < cards_limit; minor++, minors++) {
if (minors->class == DRM_MINOR_FREE) {
*minors = (drm_minor_t){.dev = dev, .class = DRM_MINOR_SECONDARY};
if ((ret = drm_proc_init(dev, minor, drm_proc_root, &minors->dev_root))) {
printk (KERN_ERR "DRM: Failed to initialize /proc/dri.\n");
*minors = (drm_minor_t) {
.dev = dev,.class = DRM_MINOR_SECONDARY};
if ((ret =
drm_proc_init(dev, minor, drm_proc_root,
&minors->dev_root))) {
printk(KERN_ERR
"DRM: Failed to initialize /proc/dri.\n");
goto err_g1;
}
dev_class = drm_sysfs_device_add(drm_class,
MKDEV(DRM_MAJOR, minor), DRM_PCI_DEV(dev->pdev), "card%d", minor);
MKDEV(DRM_MAJOR,
minor),
DRM_PCI_DEV(dev->pdev),
"card%d", minor);
if (IS_ERR(dev_class)) {
printk (KERN_ERR "DRM: Error sysfs_device_add.\n");
printk(KERN_ERR
"DRM: Error sysfs_device_add.\n");
ret = PTR_ERR(dev_class);
goto err_g2;
}
@ -247,10 +269,11 @@ int drm_get_secondary_minor(drm_device_t *dev, drm_minor_t **sec_minor)
}
DRM_ERROR("out of minors\n");
return -ENOMEM;
err_g2:
err_g2:
drm_proc_cleanup(minor, drm_proc_root, minors->dev_root);
err_g1:
*minors = (drm_minor_t){.dev = NULL, .class = DRM_MINOR_FREE};
err_g1:
*minors = (drm_minor_t) {
.dev = NULL,.class = DRM_MINOR_FREE};
drm_free(dev, sizeof(*dev), DRM_MEM_STUB);
return ret;
}
@ -265,7 +288,7 @@ err_g1:
* "drm" data, otherwise unregisters the "drm" data, frees the dev list and
* unregisters the character device.
*/
int drm_put_minor(drm_device_t *dev)
int drm_put_minor(drm_device_t * dev)
{
drm_minor_t *minors = &drm_minors[dev->minor];
@ -274,7 +297,8 @@ int drm_put_minor(drm_device_t *dev)
drm_proc_cleanup(dev->minor, drm_proc_root, minors->dev_root);
drm_sysfs_device_remove(MKDEV(DRM_MAJOR, dev->minor));
*minors = (drm_minor_t){.dev = NULL, .class = DRM_MINOR_FREE};
*minors = (drm_minor_t) {
.dev = NULL,.class = DRM_MINOR_FREE};
drm_free(dev, sizeof(*dev), DRM_MEM_STUB);
return 0;
@ -290,7 +314,7 @@ int drm_put_minor(drm_device_t *dev)
* last minor released.
*
*/
int drm_put_secondary_minor(drm_minor_t *sec_minor)
int drm_put_secondary_minor(drm_minor_t * sec_minor)
{
int minor = sec_minor - &drm_minors[0];
@ -299,8 +323,8 @@ int drm_put_secondary_minor(drm_minor_t *sec_minor)
drm_proc_cleanup(minor, drm_proc_root, sec_minor->dev_root);
drm_sysfs_device_remove(MKDEV(DRM_MAJOR, minor));
*sec_minor = (drm_minor_t){.dev = NULL, .class = DRM_MINOR_FREE};
*sec_minor = (drm_minor_t) {
.dev = NULL,.class = DRM_MINOR_FREE};
return 0;
}

View File

@ -56,8 +56,9 @@ static void drm_sysfs_class_release(struct class *class)
static ssize_t version_show(struct class *dev, char *buf)
{
return sprintf(buf, "%s %d.%d.%d %s\n", DRIVER_NAME, DRIVER_MAJOR,
DRIVER_MINOR, DRIVER_PATCHLEVEL, DRIVER_DATE);
DRIVER_MINOR, DRIVER_PATCHLEVEL, DRIVER_DATE);
}
static CLASS_ATTR(version, S_IRUGO, version_show, NULL);
/**
@ -100,7 +101,7 @@ struct drm_sysfs_class *drm_sysfs_create(struct module *owner, char *name)
return cs;
error:
error:
kfree(cs);
return ERR_PTR(retval);
}
@ -134,7 +135,9 @@ void drm_sysfs_destroy(struct drm_sysfs_class *cs)
* Note: the struct drm_sysfs_class passed to this function must have previously been
* created with a call to drm_sysfs_create().
*/
struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, dev_t dev, struct device *device, const char *fmt, ...)
struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, dev_t dev,
struct device *device,
const char *fmt, ...)
{
va_list args;
struct simple_dev *s_dev = NULL;
@ -171,7 +174,7 @@ struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, dev_t dev,
return &s_dev->class_dev;
error:
error:
kfree(s_dev);
return ERR_PTR(retval);
}
@ -203,4 +206,3 @@ void drm_sysfs_device_remove(dev_t dev)
spin_unlock(&simple_dev_list_lock);
}
}

View File

@ -35,7 +35,6 @@
#include "drmP.h"
/**
* \c nopage method for AGP virtual memory.
*
@ -48,27 +47,30 @@
*/
#if __OS_HAS_AGP
static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
unsigned long address)
unsigned long address)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
drm_map_t *map = NULL;
drm_map_list_t *r_list;
drm_map_t *map = NULL;
drm_map_list_t *r_list;
struct list_head *list;
/*
* Find the right map
*/
* Find the right map
*/
if (!drm_core_has_AGP(dev))
goto vm_nopage_error;
if(!dev->agp || !dev->agp->cant_use_aperture) goto vm_nopage_error;
if (!dev->agp || !dev->agp->cant_use_aperture)
goto vm_nopage_error;
list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head);
map = r_list->map;
if (!map) continue;
if (map->offset == VM_OFFSET(vma)) break;
if (!map)
continue;
if (map->offset == VM_OFFSET(vma))
break;
}
if (map && map->type == _DRM_AGP) {
@ -79,48 +81,50 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
#ifdef __alpha__
/*
* Adjust to a bus-relative address
*/
* Adjust to a bus-relative address
*/
baddr -= dev->hose->mem_space->start;
#endif
/*
* It's AGP memory - find the real physical page to map
*/
for(agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
* It's AGP memory - find the real physical page to map
*/
for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
if (agpmem->bound <= baddr &&
agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
break;
}
if (!agpmem) goto vm_nopage_error;
if (!agpmem)
goto vm_nopage_error;
/*
* Get the page, inc the use count, and return it
*/
* Get the page, inc the use count, and return it
*/
offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
page = virt_to_page(__va(agpmem->memory->memory[offset]));
get_page(page);
#if 0
/* page_count() not defined everywhere */
DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
baddr, __va(agpmem->memory->memory[offset]), offset,
page_count(page));
DRM_DEBUG
("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
baddr, __va(agpmem->memory->memory[offset]), offset,
page_count(page));
#endif
return page;
}
vm_nopage_error:
return NOPAGE_SIGBUS; /* Disallow mremap */
}
vm_nopage_error:
return NOPAGE_SIGBUS; /* Disallow mremap */
}
#else /* __OS_HAS_AGP */
#else /* __OS_HAS_AGP */
static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
unsigned long address)
unsigned long address)
{
return NOPAGE_SIGBUS;
}
#endif /* __OS_HAS_AGP */
#endif /* __OS_HAS_AGP */
/**
* \c nopage method for shared virtual memory.
@ -133,17 +137,19 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
* return it.
*/
static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
unsigned long address)
unsigned long address)
{
drm_map_t *map = (drm_map_t *)vma->vm_private_data;
unsigned long offset;
unsigned long i;
struct page *page;
drm_map_t *map = (drm_map_t *) vma->vm_private_data;
unsigned long offset;
unsigned long i;
struct page *page;
if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
if (!map) return NOPAGE_OOM; /* Nothing allocated */
if (address > vma->vm_end)
return NOPAGE_SIGBUS; /* Disallow mremap */
if (!map)
return NOPAGE_OOM; /* Nothing allocated */
offset = address - vma->vm_start;
offset = address - vma->vm_start;
i = (unsigned long)map->handle + offset;
page = vmalloc_to_page((void *)i);
if (!page)
@ -154,7 +160,6 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
return page;
}
/**
* \c close method for shared virtual memory.
*
@ -165,8 +170,8 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
*/
void drm_vm_shm_close(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
drm_vma_entry_t *pt, *prev, *next;
drm_map_t *map;
drm_map_list_t *r_list;
@ -182,7 +187,8 @@ void drm_vm_shm_close(struct vm_area_struct *vma)
down(&dev->struct_sem);
for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
next = pt->next;
if (pt->vma->vm_private_data == map) found_maps++;
if (pt->vma->vm_private_data == map)
found_maps++;
if (pt->vma == vma) {
if (prev) {
prev->next = pt->next;
@ -195,8 +201,7 @@ void drm_vm_shm_close(struct vm_area_struct *vma)
}
}
/* We were the only map that was found */
if(found_maps == 1 &&
map->flags & _DRM_REMOVABLE) {
if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
/* Check to see if we are in the maplist, if we are not, then
* we delete this mappings information.
*/
@ -204,10 +209,11 @@ void drm_vm_shm_close(struct vm_area_struct *vma)
list = &dev->maplist->head;
list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head);
if (r_list->map == map) found_maps++;
if (r_list->map == map)
found_maps++;
}
if(!found_maps) {
if (!found_maps) {
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
@ -243,23 +249,25 @@ void drm_vm_shm_close(struct vm_area_struct *vma)
* Determine the page number from the page offset and get it from drm_device_dma::pagelist.
*/
static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
unsigned long address)
unsigned long address)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
unsigned long offset;
unsigned long page_nr;
struct page *page;
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
unsigned long offset;
unsigned long page_nr;
struct page *page;
if (!dma) return NOPAGE_SIGBUS; /* Error */
if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
if (!dma->pagelist) return NOPAGE_OOM ; /* Nothing allocated */
if (!dma)
return NOPAGE_SIGBUS; /* Error */
if (address > vma->vm_end)
return NOPAGE_SIGBUS; /* Disallow mremap */
if (!dma->pagelist)
return NOPAGE_OOM; /* Nothing allocated */
offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
page_nr = offset >> PAGE_SHIFT;
page = virt_to_page((dma->pagelist[page_nr] +
(offset & (~PAGE_MASK))));
offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
page_nr = offset >> PAGE_SHIFT;
page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
get_page(page);
@ -277,9 +285,9 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
* Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
*/
static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
unsigned long address)
unsigned long address)
{
drm_map_t *map = (drm_map_t *)vma->vm_private_data;
drm_map_t *map = (drm_map_t *) vma->vm_private_data;
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
drm_sg_mem_t *entry = dev->sg;
@ -288,10 +296,12 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
unsigned long page_offset;
struct page *page;
if (!entry) return NOPAGE_SIGBUS; /* Error */
if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
if (!entry->pagelist) return NOPAGE_OOM ; /* Nothing allocated */
if (!entry)
return NOPAGE_SIGBUS; /* Error */
if (address > vma->vm_end)
return NOPAGE_SIGBUS; /* Disallow mremap */
if (!entry->pagelist)
return NOPAGE_OOM; /* Nothing allocated */
offset = address - vma->vm_start;
map_offset = map->offset - dev->sg->handle;
@ -302,95 +312,96 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
return page;
}
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
static struct page *drm_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type) {
if (type) *type = VM_FAULT_MINOR;
unsigned long address, int *type)
{
if (type)
*type = VM_FAULT_MINOR;
return drm_do_vm_nopage(vma, address);
}
static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type) {
if (type) *type = VM_FAULT_MINOR;
unsigned long address, int *type)
{
if (type)
*type = VM_FAULT_MINOR;
return drm_do_vm_shm_nopage(vma, address);
}
static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type) {
if (type) *type = VM_FAULT_MINOR;
unsigned long address, int *type)
{
if (type)
*type = VM_FAULT_MINOR;
return drm_do_vm_dma_nopage(vma, address);
}
static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type) {
if (type) *type = VM_FAULT_MINOR;
unsigned long address, int *type)
{
if (type)
*type = VM_FAULT_MINOR;
return drm_do_vm_sg_nopage(vma, address);
}
#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
static struct page *drm_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int unused) {
unsigned long address, int unused)
{
return drm_do_vm_nopage(vma, address);
}
static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
unsigned long address,
int unused) {
unsigned long address, int unused)
{
return drm_do_vm_shm_nopage(vma, address);
}
static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
unsigned long address,
int unused) {
unsigned long address, int unused)
{
return drm_do_vm_dma_nopage(vma, address);
}
static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
unsigned long address,
int unused) {
unsigned long address, int unused)
{
return drm_do_vm_sg_nopage(vma, address);
}
#endif
/** AGP virtual memory operations */
static struct vm_operations_struct drm_vm_ops = {
static struct vm_operations_struct drm_vm_ops = {
.nopage = drm_vm_nopage,
.open = drm_vm_open,
.close = drm_vm_close,
.open = drm_vm_open,
.close = drm_vm_close,
};
/** Shared virtual memory operations */
static struct vm_operations_struct drm_vm_shm_ops = {
static struct vm_operations_struct drm_vm_shm_ops = {
.nopage = drm_vm_shm_nopage,
.open = drm_vm_open,
.close = drm_vm_shm_close,
.open = drm_vm_open,
.close = drm_vm_shm_close,
};
/** DMA virtual memory operations */
static struct vm_operations_struct drm_vm_dma_ops = {
static struct vm_operations_struct drm_vm_dma_ops = {
.nopage = drm_vm_dma_nopage,
.open = drm_vm_open,
.close = drm_vm_close,
.open = drm_vm_open,
.close = drm_vm_close,
};
/** Scatter-gather virtual memory operations */
static struct vm_operations_struct drm_vm_sg_ops = {
static struct vm_operations_struct drm_vm_sg_ops = {
.nopage = drm_vm_sg_nopage,
.open = drm_vm_open,
.close = drm_vm_close,
.open = drm_vm_open,
.close = drm_vm_close,
};
/**
* \c open method for shared virtual memory.
*
@ -401,8 +412,8 @@ static struct vm_operations_struct drm_vm_sg_ops = {
*/
void drm_vm_open(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
drm_vma_entry_t *vma_entry;
DRM_DEBUG("0x%08lx,0x%08lx\n",
@ -412,10 +423,10 @@ void drm_vm_open(struct vm_area_struct *vma)
vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
if (vma_entry) {
down(&dev->struct_sem);
vma_entry->vma = vma;
vma_entry->vma = vma;
vma_entry->next = dev->vmalist;
vma_entry->pid = current->pid;
dev->vmalist = vma_entry;
vma_entry->pid = current->pid;
dev->vmalist = vma_entry;
up(&dev->struct_sem);
}
}
@ -430,8 +441,8 @@ void drm_vm_open(struct vm_area_struct *vma)
*/
void drm_vm_close(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
drm_vma_entry_t *pt, *prev;
DRM_DEBUG("0x%08lx,0x%08lx\n",
@ -465,38 +476,38 @@ void drm_vm_close(struct vm_area_struct *vma)
*/
int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev;
drm_file_t *priv = filp->private_data;
drm_device_t *dev;
drm_device_dma_t *dma;
unsigned long length = vma->vm_end - vma->vm_start;
unsigned long length = vma->vm_end - vma->vm_start;
lock_kernel();
dev = priv->dev;
dma = dev->dma;
dev = priv->dev;
dma = dev->dma;
DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
vma->vm_start, vma->vm_end, VM_OFFSET(vma));
/* Length must match exact page count */
/* Length must match exact page count */
if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
unlock_kernel();
return -EINVAL;
}
unlock_kernel();
vma->vm_ops = &drm_vm_dma_ops;
vma->vm_ops = &drm_vm_dma_ops;
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
#else
vma->vm_flags |= VM_RESERVED; /* Don't swap */
vma->vm_flags |= VM_RESERVED; /* Don't swap */
#endif
vma->vm_file = filp; /* Needed for drm_vm_open() */
vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open(vma);
return 0;
}
unsigned long drm_core_get_map_ofs(drm_map_t *map)
unsigned long drm_core_get_map_ofs(drm_map_t * map)
{
return map->offset;
}
@ -527,17 +538,18 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs);
*/
int drm_mmap(struct file *filp, struct vm_area_struct *vma)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_map_t *map = NULL;
drm_map_list_t *r_list;
unsigned long offset = 0;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_map_t *map = NULL;
drm_map_list_t *r_list;
unsigned long offset = 0;
struct list_head *list;
DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
vma->vm_start, vma->vm_end, VM_OFFSET(vma));
if ( !priv->authenticated ) return -EACCES;
if (!priv->authenticated)
return -EACCES;
/* We check for "dma". On Apple's UniNorth, it's valid to have
* the AGP mapped at physical address 0
@ -545,62 +557,68 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
*/
if (!VM_OFFSET(vma)
#if __OS_HAS_AGP
&& (!dev->agp || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
&& (!dev->agp
|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
#endif
)
return drm_mmap_dma(filp, vma);
/* A sequential search of a linked list is
fine here because: 1) there will only be
about 5-10 entries in the list and, 2) a
DRI client only has to do this mapping
once, so it doesn't have to be optimized
for performance, even if the list was a
bit longer. */
/* A sequential search of a linked list is
fine here because: 1) there will only be
about 5-10 entries in the list and, 2) a
DRI client only has to do this mapping
once, so it doesn't have to be optimized
for performance, even if the list was a
bit longer. */
list_for_each(list, &dev->maplist->head) {
unsigned long off;
r_list = list_entry(list, drm_map_list_t, head);
map = r_list->map;
if (!map) continue;
if (!map)
continue;
off = dev->fn_tbl->get_map_ofs(map);
if (off == VM_OFFSET(vma)) break;
if (off == VM_OFFSET(vma))
break;
}
if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
return -EPERM;
/* Check for valid size. */
if (map->size < vma->vm_end - vma->vm_start) return -EINVAL;
/* Check for valid size. */
if (map->size < vma->vm_end - vma->vm_start)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
#if defined(__i386__) || defined(__x86_64__)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else
/* Ye gads this is ugly. With more thought
we could move this up higher and use
`protection_map' instead. */
vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
__pte(pgprot_val(vma->vm_page_prot)))));
/* Ye gads this is ugly. With more thought
we could move this up higher and use
`protection_map' instead. */
vma->vm_page_prot =
__pgprot(pte_val
(pte_wrprotect
(__pte(pgprot_val(vma->vm_page_prot)))));
#endif
}
switch (map->type) {
case _DRM_AGP:
if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
/*
* On some platforms we can't talk to bus dma address from the CPU, so for
* memory of type DRM_AGP, we'll deal with sorting out the real physical
* pages and mappings in nopage()
*/
case _DRM_AGP:
if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
/*
* On some platforms we can't talk to bus dma address from the CPU, so for
* memory of type DRM_AGP, we'll deal with sorting out the real physical
* pages and mappings in nopage()
*/
#if defined(__powerpc__)
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
#endif
vma->vm_ops = &drm_vm_ops;
break;
}
/* fall through to _DRM_FRAME_BUFFER... */
vma->vm_ops = &drm_vm_ops;
break;
}
/* fall through to _DRM_FRAME_BUFFER... */
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
if (VM_OFFSET(vma) >= __pa(high_memory)) {
@ -610,13 +628,15 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
}
#elif defined(__powerpc__)
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED;
pgprot_val(vma->vm_page_prot) |=
_PAGE_NO_CACHE | _PAGE_GUARDED;
#endif
vma->vm_flags |= VM_IO; /* not in core dump */
}
#if defined(__ia64__)
if (map->type != _DRM_AGP)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma->vm_page_prot =
pgprot_writecombine(vma->vm_page_prot);
#endif
offset = dev->fn_tbl->get_reg_ofs(dev);
#ifdef __sparc__
@ -630,7 +650,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
#endif
return -EAGAIN;
return -EAGAIN;
DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
" offset = 0x%lx\n",
map->type,
@ -640,9 +660,9 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
case _DRM_SHM:
vma->vm_ops = &drm_vm_shm_ops;
vma->vm_private_data = (void *)map;
/* Don't let this area swap. Change when
DRM_KERNEL advisory is supported. */
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
/* Don't let this area swap. Change when
DRM_KERNEL advisory is supported. */
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
vma->vm_flags |= VM_LOCKED;
#else
vma->vm_flags |= VM_RESERVED;
@ -651,22 +671,22 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
case _DRM_SCATTER_GATHER:
vma->vm_ops = &drm_vm_sg_ops;
vma->vm_private_data = (void *)map;
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
vma->vm_flags |= VM_LOCKED;
#else
vma->vm_flags |= VM_RESERVED;
#endif
break;
break;
default:
return -EINVAL; /* This should never happen. */
}
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
#else
vma->vm_flags |= VM_RESERVED; /* Don't swap */
vma->vm_flags |= VM_RESERVED; /* Don't swap */
#endif
vma->vm_file = filp; /* Needed for drm_vm_open() */
vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open(vma);
return 0;
}

View File

@ -10,13 +10,10 @@
#include <linux/sched.h>
#include <asm/upa.h>
#include "ffb.h"
#include "drmP.h"
#include "ffb_drv.h"
static int DRM(alloc_queue)(drm_device_t *dev, int is_2d_only)
{
static int DRM(alloc_queue) (drm_device_t * dev, int is_2d_only) {
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
int i;
@ -37,7 +34,7 @@ static int DRM(alloc_queue)(drm_device_t *dev, int is_2d_only)
return i + 1;
}
static void ffb_save_context(ffb_dev_priv_t *fpriv, int idx)
static void ffb_save_context(ffb_dev_priv_t * fpriv, int idx)
{
ffb_fbcPtr ffb = fpriv->regs;
struct ffb_hw_context *ctx;
@ -94,36 +91,36 @@ static void ffb_save_context(ffb_dev_priv_t *fpriv, int idx)
/* Capture rendering attributes. */
ctx->ppc = upa_readl(&ffb->ppc); /* Pixel Processor Control */
ctx->wid = upa_readl(&ffb->wid); /* Current WID */
ctx->fg = upa_readl(&ffb->fg); /* Constant FG color */
ctx->bg = upa_readl(&ffb->bg); /* Constant BG color */
ctx->consty = upa_readl(&ffb->consty); /* Constant Y */
ctx->constz = upa_readl(&ffb->constz); /* Constant Z */
ctx->xclip = upa_readl(&ffb->xclip); /* X plane clip */
ctx->dcss = upa_readl(&ffb->dcss); /* Depth Cue Scale Slope */
ctx->ppc = upa_readl(&ffb->ppc); /* Pixel Processor Control */
ctx->wid = upa_readl(&ffb->wid); /* Current WID */
ctx->fg = upa_readl(&ffb->fg); /* Constant FG color */
ctx->bg = upa_readl(&ffb->bg); /* Constant BG color */
ctx->consty = upa_readl(&ffb->consty); /* Constant Y */
ctx->constz = upa_readl(&ffb->constz); /* Constant Z */
ctx->xclip = upa_readl(&ffb->xclip); /* X plane clip */
ctx->dcss = upa_readl(&ffb->dcss); /* Depth Cue Scale Slope */
ctx->vclipmin = upa_readl(&ffb->vclipmin); /* Primary XY clip, minimum */
ctx->vclipmax = upa_readl(&ffb->vclipmax); /* Primary XY clip, maximum */
ctx->vclipzmin = upa_readl(&ffb->vclipzmin); /* Primary Z clip, minimum */
ctx->vclipzmax = upa_readl(&ffb->vclipzmax); /* Primary Z clip, maximum */
ctx->dcsf = upa_readl(&ffb->dcsf); /* Depth Cue Scale Front Bound */
ctx->dcsb = upa_readl(&ffb->dcsb); /* Depth Cue Scale Back Bound */
ctx->dczf = upa_readl(&ffb->dczf); /* Depth Cue Scale Z Front */
ctx->dczb = upa_readl(&ffb->dczb); /* Depth Cue Scale Z Back */
ctx->blendc = upa_readl(&ffb->blendc); /* Alpha Blend Control */
ctx->dcsf = upa_readl(&ffb->dcsf); /* Depth Cue Scale Front Bound */
ctx->dcsb = upa_readl(&ffb->dcsb); /* Depth Cue Scale Back Bound */
ctx->dczf = upa_readl(&ffb->dczf); /* Depth Cue Scale Z Front */
ctx->dczb = upa_readl(&ffb->dczb); /* Depth Cue Scale Z Back */
ctx->blendc = upa_readl(&ffb->blendc); /* Alpha Blend Control */
ctx->blendc1 = upa_readl(&ffb->blendc1); /* Alpha Blend Color 1 */
ctx->blendc2 = upa_readl(&ffb->blendc2); /* Alpha Blend Color 2 */
ctx->fbc = upa_readl(&ffb->fbc); /* Frame Buffer Control */
ctx->rop = upa_readl(&ffb->rop); /* Raster Operation */
ctx->cmp = upa_readl(&ffb->cmp); /* Compare Controls */
ctx->fbc = upa_readl(&ffb->fbc); /* Frame Buffer Control */
ctx->rop = upa_readl(&ffb->rop); /* Raster Operation */
ctx->cmp = upa_readl(&ffb->cmp); /* Compare Controls */
ctx->matchab = upa_readl(&ffb->matchab); /* Buffer A/B Match Ops */
ctx->matchc = upa_readl(&ffb->matchc); /* Buffer C Match Ops */
ctx->magnab = upa_readl(&ffb->magnab); /* Buffer A/B Magnitude Ops */
ctx->magnc = upa_readl(&ffb->magnc); /* Buffer C Magnitude Ops */
ctx->pmask = upa_readl(&ffb->pmask); /* RGB Plane Mask */
ctx->xpmask = upa_readl(&ffb->xpmask); /* X Plane Mask */
ctx->ypmask = upa_readl(&ffb->ypmask); /* Y Plane Mask */
ctx->zpmask = upa_readl(&ffb->zpmask); /* Z Plane Mask */
ctx->matchc = upa_readl(&ffb->matchc); /* Buffer C Match Ops */
ctx->magnab = upa_readl(&ffb->magnab); /* Buffer A/B Magnitude Ops */
ctx->magnc = upa_readl(&ffb->magnc); /* Buffer C Magnitude Ops */
ctx->pmask = upa_readl(&ffb->pmask); /* RGB Plane Mask */
ctx->xpmask = upa_readl(&ffb->xpmask); /* X Plane Mask */
ctx->ypmask = upa_readl(&ffb->ypmask); /* Y Plane Mask */
ctx->zpmask = upa_readl(&ffb->zpmask); /* Z Plane Mask */
/* Auxiliary Clips. */
ctx->auxclip0min = upa_readl(&ffb->auxclip[0].min);
@ -135,9 +132,9 @@ static void ffb_save_context(ffb_dev_priv_t *fpriv, int idx)
ctx->auxclip3min = upa_readl(&ffb->auxclip[3].min);
ctx->auxclip3max = upa_readl(&ffb->auxclip[3].max);
ctx->lpat = upa_readl(&ffb->lpat); /* Line Pattern */
ctx->fontxy = upa_readl(&ffb->fontxy); /* XY Font Coordinate */
ctx->fontw = upa_readl(&ffb->fontw); /* Font Width */
ctx->lpat = upa_readl(&ffb->lpat); /* Line Pattern */
ctx->fontxy = upa_readl(&ffb->fontxy); /* XY Font Coordinate */
ctx->fontw = upa_readl(&ffb->fontw); /* Font Width */
ctx->fontinc = upa_readl(&ffb->fontinc); /* Font X/Y Increment */
/* These registers/features only exist on FFB2 and later chips. */
@ -145,12 +142,12 @@ static void ffb_save_context(ffb_dev_priv_t *fpriv, int idx)
ctx->dcss1 = upa_readl(&ffb->dcss1); /* Depth Cue Scale Slope 1 */
ctx->dcss2 = upa_readl(&ffb->dcss2); /* Depth Cue Scale Slope 2 */
ctx->dcss2 = upa_readl(&ffb->dcss3); /* Depth Cue Scale Slope 3 */
ctx->dcs2 = upa_readl(&ffb->dcs2); /* Depth Cue Scale 2 */
ctx->dcs3 = upa_readl(&ffb->dcs3); /* Depth Cue Scale 3 */
ctx->dcs4 = upa_readl(&ffb->dcs4); /* Depth Cue Scale 4 */
ctx->dcd2 = upa_readl(&ffb->dcd2); /* Depth Cue Depth 2 */
ctx->dcd3 = upa_readl(&ffb->dcd3); /* Depth Cue Depth 3 */
ctx->dcd4 = upa_readl(&ffb->dcd4); /* Depth Cue Depth 4 */
ctx->dcs2 = upa_readl(&ffb->dcs2); /* Depth Cue Scale 2 */
ctx->dcs3 = upa_readl(&ffb->dcs3); /* Depth Cue Scale 3 */
ctx->dcs4 = upa_readl(&ffb->dcs4); /* Depth Cue Scale 4 */
ctx->dcd2 = upa_readl(&ffb->dcd2); /* Depth Cue Depth 2 */
ctx->dcd3 = upa_readl(&ffb->dcd3); /* Depth Cue Depth 3 */
ctx->dcd4 = upa_readl(&ffb->dcd4); /* Depth Cue Depth 4 */
/* And stencil/stencilctl only exists on FFB2+ and later
* due to the introduction of 3DRAM-III.
@ -170,7 +167,7 @@ static void ffb_save_context(ffb_dev_priv_t *fpriv, int idx)
ctx->ucsr = upa_readl(&ffb->ucsr);
}
static void ffb_restore_context(ffb_dev_priv_t *fpriv, int old, int idx)
static void ffb_restore_context(ffb_dev_priv_t * fpriv, int old, int idx)
{
ffb_fbcPtr ffb = fpriv->regs;
struct ffb_hw_context *ctx;
@ -193,7 +190,7 @@ static void ffb_restore_context(ffb_dev_priv_t *fpriv, int old, int idx)
upa_writel(ctx->ppc, &ffb->ppc);
upa_writel(ctx->wid, &ffb->wid);
upa_writel(ctx->fg, &ffb->fg);
upa_writel(ctx->fg, &ffb->fg);
upa_writel(ctx->bg, &ffb->bg);
upa_writel(ctx->xclip, &ffb->xclip);
upa_writel(ctx->fbc, &ffb->fbc);
@ -237,36 +234,36 @@ static void ffb_restore_context(ffb_dev_priv_t *fpriv, int old, int idx)
/* Restore rendering attributes. */
upa_writel(ctx->ppc, &ffb->ppc); /* Pixel Processor Control */
upa_writel(ctx->wid, &ffb->wid); /* Current WID */
upa_writel(ctx->fg, &ffb->fg); /* Constant FG color */
upa_writel(ctx->bg, &ffb->bg); /* Constant BG color */
upa_writel(ctx->consty, &ffb->consty); /* Constant Y */
upa_writel(ctx->constz, &ffb->constz); /* Constant Z */
upa_writel(ctx->xclip, &ffb->xclip); /* X plane clip */
upa_writel(ctx->dcss, &ffb->dcss); /* Depth Cue Scale Slope */
upa_writel(ctx->ppc, &ffb->ppc); /* Pixel Processor Control */
upa_writel(ctx->wid, &ffb->wid); /* Current WID */
upa_writel(ctx->fg, &ffb->fg); /* Constant FG color */
upa_writel(ctx->bg, &ffb->bg); /* Constant BG color */
upa_writel(ctx->consty, &ffb->consty); /* Constant Y */
upa_writel(ctx->constz, &ffb->constz); /* Constant Z */
upa_writel(ctx->xclip, &ffb->xclip); /* X plane clip */
upa_writel(ctx->dcss, &ffb->dcss); /* Depth Cue Scale Slope */
upa_writel(ctx->vclipmin, &ffb->vclipmin); /* Primary XY clip, minimum */
upa_writel(ctx->vclipmax, &ffb->vclipmax); /* Primary XY clip, maximum */
upa_writel(ctx->vclipzmin, &ffb->vclipzmin); /* Primary Z clip, minimum */
upa_writel(ctx->vclipzmax, &ffb->vclipzmax); /* Primary Z clip, maximum */
upa_writel(ctx->dcsf, &ffb->dcsf); /* Depth Cue Scale Front Bound */
upa_writel(ctx->dcsb, &ffb->dcsb); /* Depth Cue Scale Back Bound */
upa_writel(ctx->dczf, &ffb->dczf); /* Depth Cue Scale Z Front */
upa_writel(ctx->dczb, &ffb->dczb); /* Depth Cue Scale Z Back */
upa_writel(ctx->blendc, &ffb->blendc); /* Alpha Blend Control */
upa_writel(ctx->dcsf, &ffb->dcsf); /* Depth Cue Scale Front Bound */
upa_writel(ctx->dcsb, &ffb->dcsb); /* Depth Cue Scale Back Bound */
upa_writel(ctx->dczf, &ffb->dczf); /* Depth Cue Scale Z Front */
upa_writel(ctx->dczb, &ffb->dczb); /* Depth Cue Scale Z Back */
upa_writel(ctx->blendc, &ffb->blendc); /* Alpha Blend Control */
upa_writel(ctx->blendc1, &ffb->blendc1); /* Alpha Blend Color 1 */
upa_writel(ctx->blendc2, &ffb->blendc2); /* Alpha Blend Color 2 */
upa_writel(ctx->fbc, &ffb->fbc); /* Frame Buffer Control */
upa_writel(ctx->rop, &ffb->rop); /* Raster Operation */
upa_writel(ctx->cmp, &ffb->cmp); /* Compare Controls */
upa_writel(ctx->fbc, &ffb->fbc); /* Frame Buffer Control */
upa_writel(ctx->rop, &ffb->rop); /* Raster Operation */
upa_writel(ctx->cmp, &ffb->cmp); /* Compare Controls */
upa_writel(ctx->matchab, &ffb->matchab); /* Buffer A/B Match Ops */
upa_writel(ctx->matchc, &ffb->matchc); /* Buffer C Match Ops */
upa_writel(ctx->magnab, &ffb->magnab); /* Buffer A/B Magnitude Ops */
upa_writel(ctx->magnc, &ffb->magnc); /* Buffer C Magnitude Ops */
upa_writel(ctx->pmask, &ffb->pmask); /* RGB Plane Mask */
upa_writel(ctx->xpmask, &ffb->xpmask); /* X Plane Mask */
upa_writel(ctx->ypmask, &ffb->ypmask); /* Y Plane Mask */
upa_writel(ctx->zpmask, &ffb->zpmask); /* Z Plane Mask */
upa_writel(ctx->matchc, &ffb->matchc); /* Buffer C Match Ops */
upa_writel(ctx->magnab, &ffb->magnab); /* Buffer A/B Magnitude Ops */
upa_writel(ctx->magnc, &ffb->magnc); /* Buffer C Magnitude Ops */
upa_writel(ctx->pmask, &ffb->pmask); /* RGB Plane Mask */
upa_writel(ctx->xpmask, &ffb->xpmask); /* X Plane Mask */
upa_writel(ctx->ypmask, &ffb->ypmask); /* Y Plane Mask */
upa_writel(ctx->zpmask, &ffb->zpmask); /* Z Plane Mask */
/* Auxiliary Clips. */
upa_writel(ctx->auxclip0min, &ffb->auxclip[0].min);
@ -278,9 +275,9 @@ static void ffb_restore_context(ffb_dev_priv_t *fpriv, int old, int idx)
upa_writel(ctx->auxclip3min, &ffb->auxclip[3].min);
upa_writel(ctx->auxclip3max, &ffb->auxclip[3].max);
upa_writel(ctx->lpat, &ffb->lpat); /* Line Pattern */
upa_writel(ctx->fontxy, &ffb->fontxy); /* XY Font Coordinate */
upa_writel(ctx->fontw, &ffb->fontw); /* Font Width */
upa_writel(ctx->lpat, &ffb->lpat); /* Line Pattern */
upa_writel(ctx->fontxy, &ffb->fontxy); /* XY Font Coordinate */
upa_writel(ctx->fontw, &ffb->fontw); /* Font Width */
upa_writel(ctx->fontinc, &ffb->fontinc); /* Font X/Y Increment */
/* These registers/features only exist on FFB2 and later chips. */
@ -354,20 +351,18 @@ static void FFBWait(ffb_fbcPtr ffb)
} while (--limit);
}
int DRM(context_switch)(drm_device_t *dev, int old, int new)
{
int DRM(context_switch) (drm_device_t * dev, int old, int new) {
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
#if DRM_DMA_HISTOGRAM
dev->ctx_start = get_cycles();
dev->ctx_start = get_cycles();
#endif
DRM_DEBUG("Context switch from %d to %d\n", old, new);
DRM_DEBUG("Context switch from %d to %d\n", old, new);
if (new == dev->last_context ||
dev->last_context == 0) {
if (new == dev->last_context || dev->last_context == 0) {
dev->last_context = new;
return 0;
return 0;
}
FFBWait(fpriv->regs);
@ -377,68 +372,62 @@ int DRM(context_switch)(drm_device_t *dev, int old, int new)
dev->last_context = new;
return 0;
return 0;
}
int DRM(resctx)(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_ctx_res_t res;
drm_ctx_t ctx;
int i;
int DRM(resctx) (struct inode * inode, struct file * filp, unsigned int cmd,
unsigned long arg) {
drm_ctx_res_t res;
drm_ctx_t ctx;
int i;
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
if (copy_from_user(&res, (drm_ctx_res_t __user *)arg, sizeof(res)))
if (copy_from_user(&res, (drm_ctx_res_t __user *) arg, sizeof(res)))
return -EFAULT;
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
if (copy_to_user(&res.contexts[i],
&i,
sizeof(i)))
if (copy_to_user(&res.contexts[i], &i, sizeof(i)))
return -EFAULT;
}
}
res.count = DRM_RESERVED_CONTEXTS;
if (copy_to_user((drm_ctx_res_t __user *)arg, &res, sizeof(res)))
if (copy_to_user((drm_ctx_res_t __user *) arg, &res, sizeof(res)))
return -EFAULT;
return 0;
}
int DRM(addctx)(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
int DRM(addctx) (struct inode * inode, struct file * filp, unsigned int cmd,
unsigned long arg) {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
int idx;
if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
return -EFAULT;
idx = DRM(alloc_queue)(dev, (ctx.flags & _DRM_CONTEXT_2DONLY));
idx = DRM(alloc_queue) (dev, (ctx.flags & _DRM_CONTEXT_2DONLY));
if (idx < 0)
return -ENFILE;
DRM_DEBUG("%d\n", ctx.handle);
ctx.handle = idx;
if (copy_to_user((drm_ctx_t __user *)arg, &ctx, sizeof(ctx)))
if (copy_to_user((drm_ctx_t __user *) arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
int DRM(modctx)(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
int DRM(modctx) (struct inode * inode, struct file * filp, unsigned int cmd,
unsigned long arg) {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
struct ffb_hw_context *hwctx;
drm_ctx_t ctx;
int idx;
if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
return -EFAULT;
idx = ctx.handle;
@ -457,17 +446,16 @@ int DRM(modctx)(struct inode *inode, struct file *filp, unsigned int cmd,
return 0;
}
int DRM(getctx)(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
int DRM(getctx) (struct inode * inode, struct file * filp, unsigned int cmd,
unsigned long arg) {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
struct ffb_hw_context *hwctx;
drm_ctx_t ctx;
int idx;
if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
return -EFAULT;
idx = ctx.handle;
@ -483,47 +471,44 @@ int DRM(getctx)(struct inode *inode, struct file *filp, unsigned int cmd,
else
ctx.flags = 0;
if (copy_to_user((drm_ctx_t __user *)arg, &ctx, sizeof(ctx)))
if (copy_to_user((drm_ctx_t __user *) arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
int DRM(switchctx)(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
int DRM(switchctx) (struct inode * inode, struct file * filp, unsigned int cmd,
unsigned long arg) {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
return DRM(context_switch)(dev, dev->last_context, ctx.handle);
return DRM(context_switch) (dev, dev->last_context, ctx.handle);
}
int DRM(newctx)(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_ctx_t ctx;
int DRM(newctx) (struct inode * inode, struct file * filp, unsigned int cmd,
unsigned long arg) {
drm_ctx_t ctx;
if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
return 0;
}
int DRM(rmctx)(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_ctx_t ctx;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
int DRM(rmctx) (struct inode * inode, struct file * filp, unsigned int cmd,
unsigned long arg) {
drm_ctx_t ctx;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
int idx;
if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
@ -538,7 +523,7 @@ int DRM(rmctx)(struct inode *inode, struct file *filp, unsigned int cmd,
return 0;
}
static void ffb_driver_release(drm_device_t *dev)
static void ffb_driver_release(drm_device_t * dev)
{
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
int context = _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock);
@ -546,28 +531,30 @@ static void ffb_driver_release(drm_device_t *dev)
idx = context - 1;
if (fpriv &&
context != DRM_KERNEL_CONTEXT &&
fpriv->hw_state[idx] != NULL) {
context != DRM_KERNEL_CONTEXT && fpriv->hw_state[idx] != NULL) {
kfree(fpriv->hw_state[idx]);
fpriv->hw_state[idx] = NULL;
}
}
static int ffb_driver_presetup(drm_device_t *dev)
static int ffb_driver_presetup(drm_device_t * dev)
{
int ret;
ret = ffb_presetup(dev);
if (_ret != 0) return ret;
if (_ret != 0)
return ret;
}
static void ffb_driver_pretakedown(drm_device_t *dev)
static void ffb_driver_pretakedown(drm_device_t * dev)
{
if (dev->dev_private) kfree(dev->dev_private);
if (dev->dev_private)
kfree(dev->dev_private);
}
static void ffb_driver_postcleanup(drm_device_t *dev)
static void ffb_driver_postcleanup(drm_device_t * dev)
{
if (ffb_position != NULL) kfree(ffb_position);
if (ffb_position != NULL)
kfree(ffb_position);
}
static int ffb_driver_kernel_context_switch_unlock(struct drm_device *dev)
@ -579,22 +566,22 @@ static int ffb_driver_kernel_context_switch_unlock(struct drm_device *dev)
ctx = lock.context;
do {
old = *plock;
new = ctx;
old = *plock;
new = ctx;
prev = cmpxchg(plock, old, new);
} while (prev != old);
}
wake_up_interruptible(&dev->lock.lock_queue);
}
unsigned long ffb_driver_get_map_ofs(drm_map_t *map)
unsigned long ffb_driver_get_map_ofs(drm_map_t * map)
{
return (map->offset & 0xffffffff);
}
unsigned long ffb_driver_get_reg_ofs(drm_device_t *dev)
unsigned long ffb_driver_get_reg_ofs(drm_device_t * dev)
{
ffb_dev_priv_t *ffb_priv = (ffb_dev_priv_t *)dev->dev_private;
ffb_dev_priv_t *ffb_priv = (ffb_dev_priv_t *) dev->dev_private;
if (ffb_priv)
return ffb_priv->card_phys_base;

View File

@ -5,17 +5,15 @@
*/
#include <linux/config.h>
#include "ffb.h"
#include "drmP.h"
#include "ffb_drv.h"
#include <linux/sched.h>
#include <linux/smp_lock.h>
#include <asm/shmparam.h>
#include <asm/oplib.h>
#include <asm/upa.h>
#include "drmP.h"
#include "ffb_drv.h"
#define DRIVER_AUTHOR "David S. Miller"
#define DRIVER_NAME "ffb"

File diff suppressed because it is too large Load Diff

View File

@ -19,15 +19,14 @@
#define I810_LOG_MIN_TEX_REGION_SIZE 16
#endif
#define I810_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
#define I810_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
#define I810_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
#define I810_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
#define I810_UPLOAD_CTX 0x4
#define I810_UPLOAD_BUFFERS 0x8
#define I810_UPLOAD_TEX0 0x10
#define I810_UPLOAD_TEX1 0x20
#define I810_UPLOAD_CLIPRECTS 0x40
/* Indices into buf.Setup where various bits of state are mirrored per
* context and per buffer. These can be fired at the card as a unit,
* or in a piecewise fashion as required.
@ -56,12 +55,12 @@
*/
#define I810_CTXREG_CF0 0 /* GFX_OP_COLOR_FACTOR */
#define I810_CTXREG_CF1 1
#define I810_CTXREG_ST0 2 /* GFX_OP_STIPPLE */
#define I810_CTXREG_ST0 2 /* GFX_OP_STIPPLE */
#define I810_CTXREG_ST1 3
#define I810_CTXREG_VF 4 /* GFX_OP_VERTEX_FMT */
#define I810_CTXREG_MT 5 /* GFX_OP_MAP_TEXELS */
#define I810_CTXREG_MC0 6 /* GFX_OP_MAP_COLOR_STAGES - stage 0 */
#define I810_CTXREG_MC1 7 /* GFX_OP_MAP_COLOR_STAGES - stage 1 */
#define I810_CTXREG_MC1 7 /* GFX_OP_MAP_COLOR_STAGES - stage 1 */
#define I810_CTXREG_MC2 8 /* GFX_OP_MAP_COLOR_STAGES - stage 2 */
#define I810_CTXREG_MA0 9 /* GFX_OP_MAP_ALPHA_STAGES - stage 0 */
#define I810_CTXREG_MA1 10 /* GFX_OP_MAP_ALPHA_STAGES - stage 1 */
@ -98,7 +97,7 @@ typedef enum _drm_i810_init_func {
I810_INIT_DMA = 0x01,
I810_CLEANUP_DMA = 0x02,
I810_INIT_DMA_1_4 = 0x03
} drm_i810_init_func_t;
} drm_i810_init_func_t;
/* This is the init structure after v1.2 */
typedef struct _drm_i810_init {
@ -127,12 +126,12 @@ typedef struct _drm_i810_init {
/* This is the init structure prior to v1.2 */
typedef struct _drm_i810_pre12_init {
drm_i810_init_func_t func;
drm_i810_init_func_t func;
#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
int ring_map_idx;
int buffer_map_idx;
#else
unsigned int mmio_offset;
unsigned int mmio_offset;
unsigned int buffers_offset;
#endif
int sarea_priv_offset;
@ -152,16 +151,16 @@ typedef struct _drm_i810_pre12_init {
* structure as well */
typedef struct _drm_i810_tex_region {
unsigned char next, prev; /* indices to form a circular LRU */
unsigned char next, prev; /* indices to form a circular LRU */
unsigned char in_use; /* owned by a client, or free? */
int age; /* tracked by clients to update local LRU's */
} drm_i810_tex_region_t;
typedef struct _drm_i810_sarea {
unsigned int ContextState[I810_CTX_SETUP_SIZE];
unsigned int BufferState[I810_DEST_SETUP_SIZE];
unsigned int TexState[2][I810_TEX_SETUP_SIZE];
unsigned int dirty;
unsigned int ContextState[I810_CTX_SETUP_SIZE];
unsigned int BufferState[I810_DEST_SETUP_SIZE];
unsigned int TexState[2][I810_TEX_SETUP_SIZE];
unsigned int dirty;
unsigned int nbox;
drm_clip_rect_t boxes[I810_NR_SAREA_CLIPRECTS];
@ -182,19 +181,19 @@ typedef struct _drm_i810_sarea {
* them all in LRU order.
*/
drm_i810_tex_region_t texList[I810_NR_TEX_REGIONS+1];
/* Last elt is sentinal */
int texAge; /* last time texture was uploaded */
int last_enqueue; /* last time a buffer was enqueued */
drm_i810_tex_region_t texList[I810_NR_TEX_REGIONS + 1];
/* Last elt is sentinal */
int texAge; /* last time texture was uploaded */
int last_enqueue; /* last time a buffer was enqueued */
int last_dispatch; /* age of the most recently dispatched buffer */
int last_quiescent; /* */
int last_quiescent; /* */
int ctxOwner; /* last context to upload state */
int vertex_prim;
int pf_enabled; /* is pageflipping allowed? */
int pf_enabled; /* is pageflipping allowed? */
int pf_active;
int pf_current_page; /* which buffer is being displayed? */
int pf_current_page; /* which buffer is being displayed? */
} drm_i810_sarea_t;
/* WARNING: If you change any of these defines, make sure to change the
@ -248,13 +247,13 @@ typedef struct _drm_i810_clear {
* new set of cliprects.
*/
typedef struct _drm_i810_vertex {
int idx; /* buffer index */
int idx; /* buffer index */
int used; /* nr bytes in use */
int discard; /* client is finished with the buffer? */
} drm_i810_vertex_t;
typedef struct _drm_i810_copy_t {
int idx; /* buffer index */
int idx; /* buffer index */
int used; /* nr bytes in use */
void *address; /* Address to copy from */
} drm_i810_copy_t;
@ -269,7 +268,6 @@ typedef struct _drm_i810_copy_t {
#define PR_RECTS (0x7<<18)
#define PR_MASK (0x7<<18)
typedef struct drm_i810_dma {
void *virtual;
int request_idx;
@ -278,17 +276,16 @@ typedef struct drm_i810_dma {
} drm_i810_dma_t;
typedef struct _drm_i810_overlay_t {
unsigned int offset; /* Address of the Overlay Regs */
unsigned int offset; /* Address of the Overlay Regs */
unsigned int physical;
} drm_i810_overlay_t;
typedef struct _drm_i810_mc {
int idx; /* buffer index */
int used; /* nr bytes in use */
int num_blocks; /* number of GFXBlocks */
int *length; /* List of lengths for GFXBlocks (FUTURE)*/
unsigned int last_render; /* Last Render Request */
int idx; /* buffer index */
int used; /* nr bytes in use */
int num_blocks; /* number of GFXBlocks */
int *length; /* List of lengths for GFXBlocks (FUTURE) */
unsigned int last_render; /* Last Render Request */
} drm_i810_mc_t;
#endif /* _I810_DRM_H_ */
#endif /* _I810_DRM_H_ */

View File

@ -38,7 +38,7 @@
#include "drm_pciids.h"
static int postinit( struct drm_device *dev, unsigned long flags )
static int postinit(struct drm_device *dev, unsigned long flags)
{
/* i810 has 4 more counters */
dev->counters += 4;
@ -47,28 +47,26 @@ static int postinit( struct drm_device *dev, unsigned long flags )
dev->types[8] = _DRM_STAT_SECONDARY;
dev->types[9] = _DRM_STAT_DMA;
DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d: %s\n",
DRIVER_NAME,
DRIVER_MAJOR,
DRIVER_MINOR,
DRIVER_PATCHLEVEL,
DRIVER_DATE,
dev->minor,
pci_pretty_name(dev->pdev)
);
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
DRIVER_NAME,
DRIVER_MAJOR,
DRIVER_MINOR,
DRIVER_PATCHLEVEL,
DRIVER_DATE, dev->minor, pci_pretty_name(dev->pdev)
);
return 0;
}
static int version( drm_version_t *version )
static int version(drm_version_t * version)
{
int len;
version->version_major = DRIVER_MAJOR;
version->version_minor = DRIVER_MINOR;
version->version_patchlevel = DRIVER_PATCHLEVEL;
DRM_COPY( version->name, DRIVER_NAME );
DRM_COPY( version->date, DRIVER_DATE );
DRM_COPY( version->desc, DRIVER_DESC );
DRM_COPY(version->name, DRIVER_NAME);
DRM_COPY(version->date, DRIVER_DATE);
DRM_COPY(version->desc, DRIVER_DESC);
return 0;
}
@ -77,25 +75,27 @@ static struct pci_device_id pciidlist[] = {
};
static drm_ioctl_desc_t ioctls[] = {
[DRM_IOCTL_NR(DRM_I810_INIT)] = { i810_dma_init, 1, 1 },
[DRM_IOCTL_NR(DRM_I810_VERTEX)] = { i810_dma_vertex, 1, 0 },
[DRM_IOCTL_NR(DRM_I810_CLEAR)] = { i810_clear_bufs, 1, 0 },
[DRM_IOCTL_NR(DRM_I810_FLUSH)] = { i810_flush_ioctl, 1, 0 },
[DRM_IOCTL_NR(DRM_I810_GETAGE)] = { i810_getage, 1, 0 },
[DRM_IOCTL_NR(DRM_I810_GETBUF)] = { i810_getbuf, 1, 0 },
[DRM_IOCTL_NR(DRM_I810_SWAP)] = { i810_swap_bufs, 1, 0 },
[DRM_IOCTL_NR(DRM_I810_COPY)] = { i810_copybuf, 1, 0 },
[DRM_IOCTL_NR(DRM_I810_DOCOPY)] = { i810_docopy, 1, 0 },
[DRM_IOCTL_NR(DRM_I810_OV0INFO)] = { i810_ov0_info, 1, 0 },
[DRM_IOCTL_NR(DRM_I810_FSTATUS)] = { i810_fstatus, 1, 0 },
[DRM_IOCTL_NR(DRM_I810_OV0FLIP)] = { i810_ov0_flip, 1, 0 },
[DRM_IOCTL_NR(DRM_I810_MC)] = { i810_dma_mc, 1, 1 },
[DRM_IOCTL_NR(DRM_I810_RSTATUS)] = { i810_rstatus, 1, 0 },
[DRM_IOCTL_NR(DRM_I810_FLIP)] = { i810_flip_bufs, 1, 0 }
[DRM_IOCTL_NR(DRM_I810_INIT)] = {i810_dma_init, 1, 1},
[DRM_IOCTL_NR(DRM_I810_VERTEX)] = {i810_dma_vertex, 1, 0},
[DRM_IOCTL_NR(DRM_I810_CLEAR)] = {i810_clear_bufs, 1, 0},
[DRM_IOCTL_NR(DRM_I810_FLUSH)] = {i810_flush_ioctl, 1, 0},
[DRM_IOCTL_NR(DRM_I810_GETAGE)] = {i810_getage, 1, 0},
[DRM_IOCTL_NR(DRM_I810_GETBUF)] = {i810_getbuf, 1, 0},
[DRM_IOCTL_NR(DRM_I810_SWAP)] = {i810_swap_bufs, 1, 0},
[DRM_IOCTL_NR(DRM_I810_COPY)] = {i810_copybuf, 1, 0},
[DRM_IOCTL_NR(DRM_I810_DOCOPY)] = {i810_docopy, 1, 0},
[DRM_IOCTL_NR(DRM_I810_OV0INFO)] = {i810_ov0_info, 1, 0},
[DRM_IOCTL_NR(DRM_I810_FSTATUS)] = {i810_fstatus, 1, 0},
[DRM_IOCTL_NR(DRM_I810_OV0FLIP)] = {i810_ov0_flip, 1, 0},
[DRM_IOCTL_NR(DRM_I810_MC)] = {i810_dma_mc, 1, 1},
[DRM_IOCTL_NR(DRM_I810_RSTATUS)] = {i810_rstatus, 1, 0},
[DRM_IOCTL_NR(DRM_I810_FLIP)] = {i810_flip_bufs, 1, 0}
};
static struct drm_driver_fn driver_fn = {
.driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
.dev_priv_size = sizeof(drm_i810_buf_priv_t),
.pretakedown = i810_driver_pretakedown,
.release = i810_driver_release,
@ -108,13 +108,14 @@ static struct drm_driver_fn driver_fn = {
.ioctls = ioctls,
.num_ioctls = DRM_ARRAY_SIZE(ioctls),
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = i810_mmap_buffers,
.fasync = drm_fasync,
},
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = i810_mmap_buffers,
.fasync = drm_fasync,
}
,
};
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@ -123,10 +124,10 @@ static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
static struct pci_driver driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
};
static int __init i810_init(void)
@ -142,6 +143,6 @@ static void __exit i810_exit(void)
module_init(i810_init);
module_exit(i810_exit);
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");

View File

@ -56,14 +56,14 @@
#define DRIVER_PATCHLEVEL 0
typedef struct drm_i810_buf_priv {
u32 *in_use;
int my_use_idx;
u32 *in_use;
int my_use_idx;
int currently_mapped;
void *virtual;
void *kernel_virtual;
} drm_i810_buf_priv_t;
typedef struct _drm_i810_ring_buffer{
typedef struct _drm_i810_ring_buffer {
int tail_mask;
unsigned long Start;
unsigned long End;
@ -79,16 +79,15 @@ typedef struct drm_i810_private {
drm_map_t *mmio_map;
drm_i810_sarea_t *sarea_priv;
drm_i810_ring_buffer_t ring;
drm_i810_ring_buffer_t ring;
void *hw_status_page;
unsigned long counter;
void *hw_status_page;
unsigned long counter;
dma_addr_t dma_status_page;
drm_buf_t *mmap_buffer;
u32 front_di1, back_di1, zi1;
int back_offset;
@ -97,7 +96,7 @@ typedef struct drm_i810_private {
int overlay_physical;
int w, h;
int pitch;
int back_pitch;
int back_pitch;
int depth_pitch;
int do_boxes;
@ -107,24 +106,24 @@ typedef struct drm_i810_private {
int page_flipping;
wait_queue_head_t irq_queue;
atomic_t irq_received;
atomic_t irq_emitted;
atomic_t irq_received;
atomic_t irq_emitted;
int front_offset;
int front_offset;
} drm_i810_private_t;
/* i810_dma.c */
extern int i810_dma_schedule(drm_device_t *dev, int locked);
extern int i810_getbuf(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_dma_init(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_dma_cleanup(drm_device_t *dev);
extern int i810_flush_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_dma_schedule(drm_device_t * dev, int locked);
extern int i810_getbuf(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_dma_init(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_dma_cleanup(drm_device_t * dev);
extern int i810_flush_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern void i810_reclaim_buffers(struct file *filp);
extern int i810_getage(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_getage(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma);
/* Obsolete:
@ -139,32 +138,31 @@ extern int i810_docopy(struct inode *inode, struct file *filp,
extern int i810_rstatus(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_ov0_info(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
unsigned int cmd, unsigned long arg);
extern int i810_fstatus(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i810_ov0_flip(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
unsigned int cmd, unsigned long arg);
extern int i810_dma_mc(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
unsigned int cmd, unsigned long arg);
extern void i810_dma_quiescent(drm_device_t *dev);
extern void i810_dma_quiescent(drm_device_t * dev);
extern int i810_dma_vertex(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
unsigned int cmd, unsigned long arg);
extern int i810_swap_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
unsigned int cmd, unsigned long arg);
extern int i810_clear_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
unsigned int cmd, unsigned long arg);
extern int i810_flip_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
unsigned int cmd, unsigned long arg);
extern int i810_driver_dma_quiescent(drm_device_t *dev);
extern void i810_driver_release(drm_device_t *dev, struct file *filp);
extern void i810_driver_pretakedown(drm_device_t *dev);
extern int i810_driver_dma_quiescent(drm_device_t * dev);
extern void i810_driver_release(drm_device_t * dev, struct file *filp);
extern void i810_driver_pretakedown(drm_device_t * dev);
#define I810_BASE(reg) ((unsigned long) \
dev_priv->mmio_map->handle)
@ -214,7 +212,6 @@ extern void i810_driver_pretakedown(drm_device_t *dev);
#define INST_OP_FLUSH 0x02000000
#define INST_FLUSH_MAP_CACHE 0x00000001
#define BB1_START_ADDR_MASK (~0x7)
#define BB1_PROTECTED (1<<0)
#define BB1_UNPROTECTED (0<<0)

File diff suppressed because it is too large Load Diff

View File

@ -33,21 +33,21 @@
#define I830_UPLOAD_CTX 0x1
#define I830_UPLOAD_BUFFERS 0x2
#define I830_UPLOAD_CLIPRECTS 0x4
#define I830_UPLOAD_TEX0_IMAGE 0x100 /* handled clientside */
#define I830_UPLOAD_TEX0_CUBE 0x200 /* handled clientside */
#define I830_UPLOAD_TEX1_IMAGE 0x400 /* handled clientside */
#define I830_UPLOAD_TEX1_CUBE 0x800 /* handled clientside */
#define I830_UPLOAD_TEX2_IMAGE 0x1000 /* handled clientside */
#define I830_UPLOAD_TEX2_CUBE 0x2000 /* handled clientside */
#define I830_UPLOAD_TEX3_IMAGE 0x4000 /* handled clientside */
#define I830_UPLOAD_TEX3_CUBE 0x8000 /* handled clientside */
#define I830_UPLOAD_TEX0_IMAGE 0x100 /* handled clientside */
#define I830_UPLOAD_TEX0_CUBE 0x200 /* handled clientside */
#define I830_UPLOAD_TEX1_IMAGE 0x400 /* handled clientside */
#define I830_UPLOAD_TEX1_CUBE 0x800 /* handled clientside */
#define I830_UPLOAD_TEX2_IMAGE 0x1000 /* handled clientside */
#define I830_UPLOAD_TEX2_CUBE 0x2000 /* handled clientside */
#define I830_UPLOAD_TEX3_IMAGE 0x4000 /* handled clientside */
#define I830_UPLOAD_TEX3_CUBE 0x8000 /* handled clientside */
#define I830_UPLOAD_TEX_N_IMAGE(n) (0x100 << (n * 2))
#define I830_UPLOAD_TEX_N_CUBE(n) (0x200 << (n * 2))
#define I830_UPLOAD_TEXIMAGE_MASK 0xff00
#define I830_UPLOAD_TEX0 0x10000
#define I830_UPLOAD_TEX1 0x20000
#define I830_UPLOAD_TEX2 0x40000
#define I830_UPLOAD_TEX3 0x80000
#define I830_UPLOAD_TEX0 0x10000
#define I830_UPLOAD_TEX1 0x20000
#define I830_UPLOAD_TEX2 0x40000
#define I830_UPLOAD_TEX3 0x80000
#define I830_UPLOAD_TEX_N(n) (0x10000 << (n))
#define I830_UPLOAD_TEX_MASK 0xf0000
#define I830_UPLOAD_TEXBLEND0 0x100000
@ -103,7 +103,7 @@
#define I830_CTXREG_AA 9
#define I830_CTXREG_FOGCOLOR 10
#define I830_CTXREG_BLENDCOLR0 11
#define I830_CTXREG_BLENDCOLR 12 /* Dword 1 of 2 dword command */
#define I830_CTXREG_BLENDCOLR 12 /* Dword 1 of 2 dword command */
#define I830_CTXREG_VF 13
#define I830_CTXREG_VF2 14
#define I830_CTXREG_MCSB0 15
@ -116,7 +116,6 @@
#define I830_STPREG_ST1 1
#define I830_STP_SETUP_SIZE 2
/* Texture state (per tex unit)
*/
@ -132,23 +131,23 @@
#define I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS */
#define I830_TEX_SETUP_SIZE 10
#define I830_TEXREG_TM0LI 0 /* load immediate 2 texture map n */
#define I830_TEXREG_TM0LI 0 /* load immediate 2 texture map n */
#define I830_TEXREG_TM0S0 1
#define I830_TEXREG_TM0S1 2
#define I830_TEXREG_TM0S2 3
#define I830_TEXREG_TM0S3 4
#define I830_TEXREG_TM0S4 5
#define I830_TEXREG_NOP0 6 /* noop */
#define I830_TEXREG_NOP1 7 /* noop */
#define I830_TEXREG_NOP2 8 /* noop */
#define __I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS -- shared */
#define I830_TEXREG_NOP0 6 /* noop */
#define I830_TEXREG_NOP1 7 /* noop */
#define I830_TEXREG_NOP2 8 /* noop */
#define __I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS -- shared */
#define __I830_TEX_SETUP_SIZE 10
#define I830_FRONT 0x1
#define I830_BACK 0x2
#define I830_DEPTH 0x4
#endif /* _I830_DEFINES_ */
#endif /* _I830_DEFINES_ */
typedef struct _drm_i830_init {
enum {
@ -177,19 +176,19 @@ typedef struct _drm_i830_init {
* structure as well */
typedef struct _drm_i830_tex_region {
unsigned char next, prev; /* indices to form a circular LRU */
unsigned char next, prev; /* indices to form a circular LRU */
unsigned char in_use; /* owned by a client, or free? */
int age; /* tracked by clients to update local LRU's */
} drm_i830_tex_region_t;
typedef struct _drm_i830_sarea {
unsigned int ContextState[I830_CTX_SETUP_SIZE];
unsigned int BufferState[I830_DEST_SETUP_SIZE];
unsigned int BufferState[I830_DEST_SETUP_SIZE];
unsigned int TexState[I830_TEXTURE_COUNT][I830_TEX_SETUP_SIZE];
unsigned int TexBlendState[I830_TEXBLEND_COUNT][I830_TEXBLEND_SIZE];
unsigned int TexBlendStateWordsUsed[I830_TEXBLEND_COUNT];
unsigned int Palette[2][256];
unsigned int dirty;
unsigned int dirty;
unsigned int nbox;
drm_clip_rect_t boxes[I830_NR_SAREA_CLIPRECTS];
@ -210,23 +209,23 @@ typedef struct _drm_i830_sarea {
* them all in LRU order.
*/
drm_i830_tex_region_t texList[I830_NR_TEX_REGIONS+1];
/* Last elt is sentinal */
int texAge; /* last time texture was uploaded */
int last_enqueue; /* last time a buffer was enqueued */
drm_i830_tex_region_t texList[I830_NR_TEX_REGIONS + 1];
/* Last elt is sentinal */
int texAge; /* last time texture was uploaded */
int last_enqueue; /* last time a buffer was enqueued */
int last_dispatch; /* age of the most recently dispatched buffer */
int last_quiescent; /* */
int last_quiescent; /* */
int ctxOwner; /* last context to upload state */
int vertex_prim;
int pf_enabled; /* is pageflipping allowed? */
int pf_active;
int pf_current_page; /* which buffer is being displayed? */
int pf_enabled; /* is pageflipping allowed? */
int pf_active;
int pf_current_page; /* which buffer is being displayed? */
int perf_boxes; /* performance boxes to be displayed */
int perf_boxes; /* performance boxes to be displayed */
/* Here's the state for texunits 2,3:
/* Here's the state for texunits 2,3:
*/
unsigned int TexState2[I830_TEX_SETUP_SIZE];
unsigned int TexBlendState2[I830_TEXBLEND_SIZE];
@ -241,26 +240,25 @@ typedef struct _drm_i830_sarea {
/* Flags for perf_boxes
*/
#define I830_BOX_RING_EMPTY 0x1 /* populated by kernel */
#define I830_BOX_FLIP 0x2 /* populated by kernel */
#define I830_BOX_WAIT 0x4 /* populated by kernel & client */
#define I830_BOX_TEXTURE_LOAD 0x8 /* populated by kernel */
#define I830_BOX_LOST_CONTEXT 0x10 /* populated by client */
#define I830_BOX_RING_EMPTY 0x1 /* populated by kernel */
#define I830_BOX_FLIP 0x2 /* populated by kernel */
#define I830_BOX_WAIT 0x4 /* populated by kernel & client */
#define I830_BOX_TEXTURE_LOAD 0x8 /* populated by kernel */
#define I830_BOX_LOST_CONTEXT 0x10 /* populated by client */
/* I830 specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
*/
#define DRM_I830_INIT 0x00
#define DRM_I830_VERTEX 0x01
#define DRM_I830_CLEAR 0x02
#define DRM_I830_FLUSH 0x03
#define DRM_I830_GETAGE 0x04
#define DRM_I830_GETBUF 0x05
#define DRM_I830_SWAP 0x06
#define DRM_I830_COPY 0x07
#define DRM_I830_DOCOPY 0x08
#define DRM_I830_FLIP 0x09
#define DRM_I830_INIT 0x00
#define DRM_I830_VERTEX 0x01
#define DRM_I830_CLEAR 0x02
#define DRM_I830_FLUSH 0x03
#define DRM_I830_GETAGE 0x04
#define DRM_I830_GETBUF 0x05
#define DRM_I830_SWAP 0x06
#define DRM_I830_COPY 0x07
#define DRM_I830_DOCOPY 0x08
#define DRM_I830_FLIP 0x09
#define DRM_I830_IRQ_EMIT 0x0a
#define DRM_I830_IRQ_WAIT 0x0b
#define DRM_I830_GETPARAM 0x0c
@ -289,23 +287,21 @@ typedef struct _drm_i830_clear {
unsigned int clear_depthmask;
} drm_i830_clear_t;
/* These may be placeholders if we have more cliprects than
* I830_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
* false, indicating that the buffer will be dispatched again with a
* new set of cliprects.
*/
typedef struct _drm_i830_vertex {
int idx; /* buffer index */
int idx; /* buffer index */
int used; /* nr bytes in use */
int discard; /* client is finished with the buffer? */
} drm_i830_vertex_t;
typedef struct _drm_i830_copy_t {
int idx; /* buffer index */
int idx; /* buffer index */
int used; /* nr bytes in use */
void __user *address; /* Address to copy from */
void __user *address; /* Address to copy from */
} drm_i830_copy_t;
typedef struct drm_i830_dma {
@ -315,7 +311,6 @@ typedef struct drm_i830_dma {
int granted;
} drm_i830_dma_t;
/* 1.3: Userspace can request & wait on irq's:
*/
typedef struct drm_i830_irq_emit {
@ -326,7 +321,6 @@ typedef struct drm_i830_irq_wait {
int irq_seq;
} drm_i830_irq_wait_t;
/* 1.3: New ioctl to query kernel params:
*/
#define I830_PARAM_IRQ_ACTIVE 1
@ -336,7 +330,6 @@ typedef struct drm_i830_getparam {
int __user *value;
} drm_i830_getparam_t;
/* 1.3: New ioctl to set kernel params:
*/
#define I830_SETPARAM_USE_MI_BATCHBUFFER_START 1
@ -346,5 +339,4 @@ typedef struct drm_i830_setparam {
int value;
} drm_i830_setparam_t;
#endif /* _I830_DRM_H_ */
#endif /* _I830_DRM_H_ */

View File

@ -33,6 +33,7 @@
*/
#include <linux/config.h>
#include "drmP.h"
#include "drm.h"
#include "i830_drm.h"
@ -40,7 +41,7 @@
#include "drm_pciids.h"
int postinit( struct drm_device *dev, unsigned long flags )
int postinit(struct drm_device *dev, unsigned long flags)
{
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
@ -48,28 +49,26 @@ int postinit( struct drm_device *dev, unsigned long flags )
dev->types[8] = _DRM_STAT_SECONDARY;
dev->types[9] = _DRM_STAT_DMA;
DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d: %s\n",
DRIVER_NAME,
DRIVER_MAJOR,
DRIVER_MINOR,
DRIVER_PATCHLEVEL,
DRIVER_DATE,
dev->minor,
pci_pretty_name(dev->pdev)
);
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
DRIVER_NAME,
DRIVER_MAJOR,
DRIVER_MINOR,
DRIVER_PATCHLEVEL,
DRIVER_DATE, dev->minor, pci_pretty_name(dev->pdev)
);
return 0;
}
static int version( drm_version_t *version )
static int version(drm_version_t * version)
{
int len;
version->version_major = DRIVER_MAJOR;
version->version_minor = DRIVER_MINOR;
version->version_patchlevel = DRIVER_PATCHLEVEL;
DRM_COPY( version->name, DRIVER_NAME );
DRM_COPY( version->date, DRIVER_DATE );
DRM_COPY( version->desc, DRIVER_DESC );
DRM_COPY(version->name, DRIVER_NAME);
DRM_COPY(version->date, DRIVER_DATE);
DRM_COPY(version->desc, DRIVER_DESC);
return 0;
}
@ -78,24 +77,26 @@ static struct pci_device_id pciidlist[] = {
};
static drm_ioctl_desc_t ioctls[] = {
[DRM_IOCTL_NR(DRM_I830_INIT)] = { i830_dma_init, 1, 1 },
[DRM_IOCTL_NR(DRM_I830_VERTEX)] = { i830_dma_vertex, 1, 0 },
[DRM_IOCTL_NR(DRM_I830_CLEAR)] = { i830_clear_bufs, 1, 0 },
[DRM_IOCTL_NR(DRM_I830_FLUSH)] = { i830_flush_ioctl, 1, 0 },
[DRM_IOCTL_NR(DRM_I830_GETAGE)] = { i830_getage, 1, 0 },
[DRM_IOCTL_NR(DRM_I830_GETBUF)] = { i830_getbuf, 1, 0 },
[DRM_IOCTL_NR(DRM_I830_SWAP)] = { i830_swap_bufs, 1, 0 },
[DRM_IOCTL_NR(DRM_I830_COPY)] = { i830_copybuf, 1, 0 },
[DRM_IOCTL_NR(DRM_I830_DOCOPY)] = { i830_docopy, 1, 0 },
[DRM_IOCTL_NR(DRM_I830_FLIP)] = { i830_flip_bufs, 1, 0 },
[DRM_IOCTL_NR(DRM_I830_IRQ_EMIT)] = { i830_irq_emit, 1, 0 },
[DRM_IOCTL_NR(DRM_I830_IRQ_WAIT)] = { i830_irq_wait, 1, 0 },
[DRM_IOCTL_NR(DRM_I830_GETPARAM)] = { i830_getparam, 1, 0 },
[DRM_IOCTL_NR(DRM_I830_SETPARAM)] = { i830_setparam, 1, 0 }
[DRM_IOCTL_NR(DRM_I830_INIT)] = {i830_dma_init, 1, 1},
[DRM_IOCTL_NR(DRM_I830_VERTEX)] = {i830_dma_vertex, 1, 0},
[DRM_IOCTL_NR(DRM_I830_CLEAR)] = {i830_clear_bufs, 1, 0},
[DRM_IOCTL_NR(DRM_I830_FLUSH)] = {i830_flush_ioctl, 1, 0},
[DRM_IOCTL_NR(DRM_I830_GETAGE)] = {i830_getage, 1, 0},
[DRM_IOCTL_NR(DRM_I830_GETBUF)] = {i830_getbuf, 1, 0},
[DRM_IOCTL_NR(DRM_I830_SWAP)] = {i830_swap_bufs, 1, 0},
[DRM_IOCTL_NR(DRM_I830_COPY)] = {i830_copybuf, 1, 0},
[DRM_IOCTL_NR(DRM_I830_DOCOPY)] = {i830_docopy, 1, 0},
[DRM_IOCTL_NR(DRM_I830_FLIP)] = {i830_flip_bufs, 1, 0},
[DRM_IOCTL_NR(DRM_I830_IRQ_EMIT)] = {i830_irq_emit, 1, 0},
[DRM_IOCTL_NR(DRM_I830_IRQ_WAIT)] = {i830_irq_wait, 1, 0},
[DRM_IOCTL_NR(DRM_I830_GETPARAM)] = {i830_getparam, 1, 0},
[DRM_IOCTL_NR(DRM_I830_SETPARAM)] = {i830_setparam, 1, 0}
};
static struct drm_driver_fn driver_fn = {
.driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
#if USE_IRQS
.driver_features |= DRIVER_HAVE_IRQ | DRIVER_SHARED_IRQ,
#endif
@ -117,13 +118,14 @@ static struct drm_driver_fn driver_fn = {
.ioctls = ioctls,
.num_ioctls = DRM_ARRAY_SIZE(ioctls),
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = i830_mmap_buffers,
.fasync = drm_fasync,
},
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = i830_mmap_buffers,
.fasync = drm_fasync,
}
,
};
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@ -132,10 +134,10 @@ static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
static struct pci_driver driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
};
static int __init i830_init(void)
@ -151,6 +153,6 @@ static void __exit i830_exit(void)
module_init(i830_init);
module_exit(i830_exit);
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");

View File

@ -63,14 +63,14 @@
#define USE_IRQS 0
typedef struct drm_i830_buf_priv {
u32 *in_use;
int my_use_idx;
u32 *in_use;
int my_use_idx;
int currently_mapped;
void __user *virtual;
void *kernel_virtual;
} drm_i830_buf_priv_t;
typedef struct _drm_i830_ring_buffer{
typedef struct _drm_i830_ring_buffer {
int tail_mask;
unsigned long Start;
unsigned long End;
@ -86,10 +86,10 @@ typedef struct drm_i830_private {
drm_map_t *mmio_map;
drm_i830_sarea_t *sarea_priv;
drm_i830_ring_buffer_t ring;
drm_i830_ring_buffer_t ring;
void * hw_status_page;
unsigned long counter;
void *hw_status_page;
unsigned long counter;
dma_addr_t dma_status_page;
@ -113,66 +113,66 @@ typedef struct drm_i830_private {
int page_flipping;
wait_queue_head_t irq_queue;
atomic_t irq_received;
atomic_t irq_emitted;
atomic_t irq_received;
atomic_t irq_emitted;
int use_mi_batchbuffer_start;
} drm_i830_private_t;
/* i830_dma.c */
extern int i830_dma_schedule(drm_device_t *dev, int locked);
extern int i830_getbuf(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i830_dma_init(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i830_dma_cleanup(drm_device_t *dev);
extern int i830_flush_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i830_dma_schedule(drm_device_t * dev, int locked);
extern int i830_getbuf(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i830_dma_init(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i830_dma_cleanup(drm_device_t * dev);
extern int i830_flush_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern void i830_reclaim_buffers(struct file *filp);
extern int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg);
extern int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg);
extern int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma);
extern int i830_copybuf(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i830_docopy(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern void i830_dma_quiescent(drm_device_t *dev);
extern void i830_dma_quiescent(drm_device_t * dev);
extern int i830_dma_vertex(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
unsigned int cmd, unsigned long arg);
extern int i830_swap_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i830_clear_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i830_clear_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i830_flip_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i830_getparam(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i830_getparam( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int i830_setparam( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int i830_setparam(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* i830_irq.c */
extern int i830_irq_emit( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int i830_irq_wait( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int i830_wait_irq(drm_device_t *dev, int irq_nr);
extern int i830_emit_irq(drm_device_t *dev);
extern int i830_irq_emit(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i830_irq_wait(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i830_wait_irq(drm_device_t * dev, int irq_nr);
extern int i830_emit_irq(drm_device_t * dev);
extern irqreturn_t i830_driver_irq_handler( DRM_IRQ_ARGS );
extern void i830_driver_irq_preinstall( drm_device_t *dev );
extern void i830_driver_irq_postinstall( drm_device_t *dev );
extern void i830_driver_irq_uninstall( drm_device_t *dev );
extern void i830_driver_pretakedown(drm_device_t *dev);
extern void i830_driver_release(drm_device_t *dev, struct file *filp);
extern int i830_driver_dma_quiescent(drm_device_t *dev);
extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS);
extern void i830_driver_irq_preinstall(drm_device_t * dev);
extern void i830_driver_irq_postinstall(drm_device_t * dev);
extern void i830_driver_irq_uninstall(drm_device_t * dev);
extern void i830_driver_pretakedown(drm_device_t * dev);
extern void i830_driver_release(drm_device_t * dev, struct file *filp);
extern int i830_driver_dma_quiescent(drm_device_t * dev);
#define I830_BASE(reg) ((unsigned long) \
dev_priv->mmio_map->handle)
@ -184,8 +184,6 @@ extern int i830_driver_dma_quiescent(drm_device_t *dev);
#define I830_READ16(reg) I830_DEREF16(reg)
#define I830_WRITE16(reg,val) do { I830_DEREF16(reg) = val; } while (0)
#define I830_VERBOSE 0
#define RING_LOCALS unsigned int outring, ringmask, outcount; \
@ -203,7 +201,6 @@ extern int i830_driver_dma_quiescent(drm_device_t *dev);
virt = dev_priv->ring.virtual_start; \
} while (0)
#define OUT_RING(n) do { \
if (I830_VERBOSE) printk(" OUT_RING %x\n", (int)(n)); \
*(volatile unsigned int *)(virt + outring) = n; \
@ -219,8 +216,7 @@ extern int i830_driver_dma_quiescent(drm_device_t *dev);
I830_WRITE(LP_RING + RING_TAIL, outring); \
} while(0)
extern int i830_wait_ring(drm_device_t *dev, int n, const char *caller);
extern int i830_wait_ring(drm_device_t * dev, int n, const char *caller);
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
@ -235,7 +231,6 @@ extern int i830_wait_ring(drm_device_t *dev, int n, const char *caller);
#define INST_OP_FLUSH 0x02000000
#define INST_FLUSH_MAP_CACHE 0x00000001
#define BB1_START_ADDR_MASK (~0x7)
#define BB1_PROTECTED (1<<0)
#define BB1_UNPROTECTED (0<<0)
@ -248,7 +243,6 @@ extern int i830_wait_ring(drm_device_t *dev, int n, const char *caller);
#define I830_IRQ_RESERVED ((1<<13)|(3<<2))
#define LP_RING 0x2030
#define HP_RING 0x2040
#define RING_TAIL 0x00
@ -332,4 +326,3 @@ extern int i830_wait_ring(drm_device_t *dev, int n, const char *caller);
#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23))
#endif

View File

@ -26,24 +26,24 @@
*
*/
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
#include "drmP.h"
#include "drm.h"
#include "i830_drm.h"
#include "i830_drv.h"
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
irqreturn_t i830_driver_irq_handler( DRM_IRQ_ARGS )
irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
{
drm_device_t *dev = (drm_device_t *)arg;
drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
u16 temp;
drm_device_t *dev = (drm_device_t *) arg;
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
u16 temp;
temp = I830_READ16(I830REG_INT_IDENTITY_R);
temp = I830_READ16(I830REG_INT_IDENTITY_R);
DRM_DEBUG("%x\n", temp);
if ( !( temp & 2 ) )
if (!(temp & 2))
return IRQ_NONE;
I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
@ -54,8 +54,7 @@ irqreturn_t i830_driver_irq_handler( DRM_IRQ_ARGS )
return IRQ_HANDLED;
}
int i830_emit_irq(drm_device_t *dev)
int i830_emit_irq(drm_device_t * dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@ -64,27 +63,25 @@ int i830_emit_irq(drm_device_t *dev)
atomic_inc(&dev_priv->irq_emitted);
BEGIN_LP_RING(2);
OUT_RING( 0 );
OUT_RING( GFX_OP_USER_INTERRUPT );
ADVANCE_LP_RING();
BEGIN_LP_RING(2);
OUT_RING(0);
OUT_RING(GFX_OP_USER_INTERRUPT);
ADVANCE_LP_RING();
return atomic_read(&dev_priv->irq_emitted);
}
int i830_wait_irq(drm_device_t *dev, int irq_nr)
int i830_wait_irq(drm_device_t * dev, int irq_nr)
{
drm_i830_private_t *dev_priv =
(drm_i830_private_t *)dev->dev_private;
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
DECLARE_WAITQUEUE(entry, current);
unsigned long end = jiffies + HZ*3;
unsigned long end = jiffies + HZ * 3;
int ret = 0;
DRM_DEBUG("%s\n", __FUNCTION__);
if (atomic_read(&dev_priv->irq_received) >= irq_nr)
return 0;
if (atomic_read(&dev_priv->irq_received) >= irq_nr)
return 0;
dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
@ -92,21 +89,21 @@ int i830_wait_irq(drm_device_t *dev, int irq_nr)
for (;;) {
current->state = TASK_INTERRUPTIBLE;
if (atomic_read(&dev_priv->irq_received) >= irq_nr)
break;
if((signed)(end - jiffies) <= 0) {
if (atomic_read(&dev_priv->irq_received) >= irq_nr)
break;
if ((signed)(end - jiffies) <= 0) {
DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
I830_READ16( I830REG_INT_IDENTITY_R ),
I830_READ16( I830REG_INT_MASK_R ),
I830_READ16( I830REG_INT_ENABLE_R ),
I830_READ16( I830REG_HWSTAM ));
I830_READ16(I830REG_INT_IDENTITY_R),
I830_READ16(I830REG_INT_MASK_R),
I830_READ16(I830REG_INT_ENABLE_R),
I830_READ16(I830REG_HWSTAM));
ret = -EBUSY; /* Lockup? Missed irq? */
ret = -EBUSY; /* Lockup? Missed irq? */
break;
}
schedule_timeout(HZ*3);
if (signal_pending(current)) {
ret = -EINTR;
schedule_timeout(HZ * 3);
if (signal_pending(current)) {
ret = -EINTR;
break;
}
}
@ -116,92 +113,90 @@ int i830_wait_irq(drm_device_t *dev, int irq_nr)
return ret;
}
/* Needs the lock as it touches the ring.
*/
int i830_irq_emit( struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg )
int i830_irq_emit(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_irq_emit_t emit;
int result;
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("i830_irq_emit called without lock held\n");
return -EINVAL;
}
if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return -EINVAL;
}
if (copy_from_user( &emit, (drm_i830_irq_emit_t __user *)arg, sizeof(emit) ))
if (copy_from_user
(&emit, (drm_i830_irq_emit_t __user *) arg, sizeof(emit)))
return -EFAULT;
result = i830_emit_irq( dev );
result = i830_emit_irq(dev);
if ( copy_to_user( emit.irq_seq, &result, sizeof(int) ) ) {
DRM_ERROR( "copy_to_user\n" );
if (copy_to_user(emit.irq_seq, &result, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return -EFAULT;
}
return 0;
}
/* Doesn't need the hardware lock.
*/
int i830_irq_wait( struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg )
int i830_irq_wait(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_irq_wait_t irqwait;
if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return -EINVAL;
}
if (copy_from_user( &irqwait, (drm_i830_irq_wait_t __user *)arg,
sizeof(irqwait) ))
if (copy_from_user(&irqwait, (drm_i830_irq_wait_t __user *) arg,
sizeof(irqwait)))
return -EFAULT;
return i830_wait_irq( dev, irqwait.irq_seq );
return i830_wait_irq(dev, irqwait.irq_seq);
}
/* drm_dma.h hooks
*/
void i830_driver_irq_preinstall( drm_device_t *dev ) {
drm_i830_private_t *dev_priv =
(drm_i830_private_t *)dev->dev_private;
void i830_driver_irq_preinstall(drm_device_t * dev)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
I830_WRITE16( I830REG_HWSTAM, 0xffff );
I830_WRITE16( I830REG_INT_MASK_R, 0x0 );
I830_WRITE16( I830REG_INT_ENABLE_R, 0x0 );
I830_WRITE16(I830REG_HWSTAM, 0xffff);
I830_WRITE16(I830REG_INT_MASK_R, 0x0);
I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
atomic_set(&dev_priv->irq_received, 0);
atomic_set(&dev_priv->irq_emitted, 0);
init_waitqueue_head(&dev_priv->irq_queue);
}
void i830_driver_irq_postinstall( drm_device_t *dev ) {
drm_i830_private_t *dev_priv =
(drm_i830_private_t *)dev->dev_private;
void i830_driver_irq_postinstall(drm_device_t * dev)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
I830_WRITE16( I830REG_INT_ENABLE_R, 0x2 );
I830_WRITE16(I830REG_INT_ENABLE_R, 0x2);
}
void i830_driver_irq_uninstall( drm_device_t *dev ) {
drm_i830_private_t *dev_priv =
(drm_i830_private_t *)dev->dev_private;
void i830_driver_irq_uninstall(drm_device_t * dev)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
if (!dev_priv)
return;
I830_WRITE16( I830REG_INT_MASK_R, 0xffff );
I830_WRITE16( I830REG_INT_ENABLE_R, 0x0 );
I830_WRITE16(I830REG_INT_MASK_R, 0xffff);
I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
}

View File

@ -15,7 +15,7 @@
#include "drm_pciids.h"
int postinit( struct drm_device *dev, unsigned long flags )
int postinit(struct drm_device *dev, unsigned long flags)
{
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
@ -23,28 +23,26 @@ int postinit( struct drm_device *dev, unsigned long flags )
dev->types[8] = _DRM_STAT_SECONDARY;
dev->types[9] = _DRM_STAT_DMA;
DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d: %s\n",
DRIVER_NAME,
DRIVER_MAJOR,
DRIVER_MINOR,
DRIVER_PATCHLEVEL,
DRIVER_DATE,
dev->minor,
pci_pretty_name(dev->pdev)
);
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
DRIVER_NAME,
DRIVER_MAJOR,
DRIVER_MINOR,
DRIVER_PATCHLEVEL,
DRIVER_DATE, dev->minor, pci_pretty_name(dev->pdev)
);
return 0;
}
static int version( drm_version_t *version )
static int version(drm_version_t * version)
{
int len;
version->version_major = DRIVER_MAJOR;
version->version_minor = DRIVER_MINOR;
version->version_patchlevel = DRIVER_PATCHLEVEL;
DRM_COPY( version->name, DRIVER_NAME );
DRM_COPY( version->date, DRIVER_DATE );
DRM_COPY( version->desc, DRIVER_DESC );
DRM_COPY(version->name, DRIVER_NAME);
DRM_COPY(version->date, DRIVER_DATE);
DRM_COPY(version->desc, DRIVER_DESC);
return 0;
}
@ -53,23 +51,24 @@ static struct pci_device_id pciidlist[] = {
};
static drm_ioctl_desc_t ioctls[] = {
[DRM_IOCTL_NR(DRM_I915_INIT)] = { i915_dma_init, 1, 1 },
[DRM_IOCTL_NR(DRM_I915_FLUSH)] = { i915_flush_ioctl, 1, 0 },
[DRM_IOCTL_NR(DRM_I915_FLIP)] = { i915_flip_bufs, 1, 0 },
[DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = { i915_batchbuffer, 1, 0 },
[DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = { i915_irq_emit, 1, 0 },
[DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = { i915_irq_wait, 1, 0 },
[DRM_IOCTL_NR(DRM_I915_GETPARAM)] = { i915_getparam, 1, 0 },
[DRM_IOCTL_NR(DRM_I915_SETPARAM)] = { i915_setparam, 1, 1 },
[DRM_IOCTL_NR(DRM_I915_ALLOC)] = { i915_mem_alloc, 1, 0 },
[DRM_IOCTL_NR(DRM_I915_FREE)] = { i915_mem_free, 1, 0 },
[DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = { i915_mem_init_heap, 1, 1 },
[DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = { i915_cmdbuffer, 1, 0 }
[DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, 1, 1},
[DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, 1, 0},
[DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, 1, 0},
[DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, 1, 0},
[DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, 1, 0},
[DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, 1, 0},
[DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, 1, 0},
[DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, 1, 1},
[DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, 1, 0},
[DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, 1, 0},
[DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, 1, 1},
[DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, 1, 0}
};
static struct drm_driver_fn driver_fn = {
.driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.pretakedown = i915_driver_pretakedown,
.prerelease = i915_driver_prerelease,
.irq_preinstall = i915_driver_irq_preinstall,
@ -84,13 +83,13 @@ static struct drm_driver_fn driver_fn = {
.ioctls = ioctls,
.num_ioctls = DRM_ARRAY_SIZE(ioctls),
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.fasync = drm_fasync,
},
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.fasync = drm_fasync,
},
};
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@ -99,10 +98,10 @@ static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
static struct pci_driver driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
};
static int __init i915_init(void)
@ -118,6 +117,6 @@ static void __exit i915_exit(void)
module_init(i915_init);
module_exit(i915_exit);
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");

View File

@ -35,30 +35,28 @@
#include "drm_pciids.h"
static int postinit( struct drm_device *dev, unsigned long flags )
static int postinit(struct drm_device *dev, unsigned long flags)
{
DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d: %s\n",
DRIVER_NAME,
DRIVER_MAJOR,
DRIVER_MINOR,
DRIVER_PATCHLEVEL,
DRIVER_DATE,
dev->minor,
pci_pretty_name(dev->pdev)
);
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
DRIVER_NAME,
DRIVER_MAJOR,
DRIVER_MINOR,
DRIVER_PATCHLEVEL,
DRIVER_DATE, dev->minor, pci_pretty_name(dev->pdev)
);
return 0;
}
static int version( drm_version_t *version )
static int version(drm_version_t * version)
{
int len;
version->version_major = DRIVER_MAJOR;
version->version_minor = DRIVER_MINOR;
version->version_patchlevel = DRIVER_PATCHLEVEL;
DRM_COPY( version->name, DRIVER_NAME );
DRM_COPY( version->date, DRIVER_DATE );
DRM_COPY( version->desc, DRIVER_DESC );
DRM_COPY(version->name, DRIVER_NAME);
DRM_COPY(version->date, DRIVER_DATE);
DRM_COPY(version->desc, DRIVER_DESC);
return 0;
}
@ -72,19 +70,21 @@ static struct pci_device_id pciidlist[] = {
*
*/
static drm_ioctl_desc_t ioctls[] = {
[DRM_IOCTL_NR(DRM_MACH64_INIT)] = { mach64_dma_init, 1, 1 },
[DRM_IOCTL_NR(DRM_MACH64_CLEAR)] = { mach64_dma_clear, 1, 0 },
[DRM_IOCTL_NR(DRM_MACH64_SWAP)] = { mach64_dma_swap, 1, 0 },
[DRM_IOCTL_NR(DRM_MACH64_IDLE)] = { mach64_dma_idle, 1, 0 },
[DRM_IOCTL_NR(DRM_MACH64_RESET)] = { mach64_engine_reset, 1, 0 },
[DRM_IOCTL_NR(DRM_MACH64_VERTEX)] = { mach64_dma_vertex, 1, 0 },
[DRM_IOCTL_NR(DRM_MACH64_BLIT)] = { mach64_dma_blit, 1, 0 },
[DRM_IOCTL_NR(DRM_MACH64_FLUSH)] = { mach64_dma_flush, 1, 0 },
[DRM_IOCTL_NR(DRM_MACH64_GETPARAM)] = { mach64_get_param, 1, 0 },
[DRM_IOCTL_NR(DRM_MACH64_INIT)] = {mach64_dma_init, 1, 1},
[DRM_IOCTL_NR(DRM_MACH64_CLEAR)] = {mach64_dma_clear, 1, 0},
[DRM_IOCTL_NR(DRM_MACH64_SWAP)] = {mach64_dma_swap, 1, 0},
[DRM_IOCTL_NR(DRM_MACH64_IDLE)] = {mach64_dma_idle, 1, 0},
[DRM_IOCTL_NR(DRM_MACH64_RESET)] = {mach64_engine_reset, 1, 0},
[DRM_IOCTL_NR(DRM_MACH64_VERTEX)] = {mach64_dma_vertex, 1, 0},
[DRM_IOCTL_NR(DRM_MACH64_BLIT)] = {mach64_dma_blit, 1, 0},
[DRM_IOCTL_NR(DRM_MACH64_FLUSH)] = {mach64_dma_flush, 1, 0},
[DRM_IOCTL_NR(DRM_MACH64_GETPARAM)] = {mach64_get_param, 1, 0},
};
static struct drm_driver_fn driver_fn = {
.driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_HAVE_DMA
| DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
.pretakedown = mach64_driver_pretakedown,
.vblank_wait = mach64_driver_vblank_wait,
.irq_preinstall = mach64_driver_irq_preinstall,
@ -100,13 +100,13 @@ static struct drm_driver_fn driver_fn = {
.num_ioctls = DRM_ARRAY_SIZE(ioctls),
.dma_ioctl = mach64_dma_buffers,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.fasync = drm_fasync,
},
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.fasync = drm_fasync,
},
};
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@ -115,10 +115,10 @@ static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
static struct pci_driver driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
};
static int __init mach64_init(void)
@ -134,6 +134,6 @@ static void __exit mach64_exit(void)
module_init(mach64_init);
module_exit(mach64_exit);
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");

View File

@ -37,35 +37,33 @@
#include "drm_pciids.h"
static int postinit( struct drm_device *dev, unsigned long flags )
static int postinit(struct drm_device *dev, unsigned long flags)
{
dev->counters += 3;
dev->types[6] = _DRM_STAT_IRQ;
dev->types[7] = _DRM_STAT_PRIMARY;
dev->types[8] = _DRM_STAT_SECONDARY;
DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d: %s\n",
DRIVER_NAME,
DRIVER_MAJOR,
DRIVER_MINOR,
DRIVER_PATCHLEVEL,
DRIVER_DATE,
dev->minor,
pci_pretty_name(dev->pdev)
);
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
DRIVER_NAME,
DRIVER_MAJOR,
DRIVER_MINOR,
DRIVER_PATCHLEVEL,
DRIVER_DATE, dev->minor, pci_pretty_name(dev->pdev)
);
return 0;
}
static int version( drm_version_t *version )
static int version(drm_version_t * version)
{
int len;
version->version_major = DRIVER_MAJOR;
version->version_minor = DRIVER_MINOR;
version->version_patchlevel = DRIVER_PATCHLEVEL;
DRM_COPY( version->name, DRIVER_NAME );
DRM_COPY( version->date, DRIVER_DATE );
DRM_COPY( version->desc, DRIVER_DESC );
DRM_COPY(version->name, DRIVER_NAME);
DRM_COPY(version->date, DRIVER_DATE);
DRM_COPY(version->desc, DRIVER_DESC);
return 0;
}
@ -74,20 +72,23 @@ static struct pci_device_id pciidlist[] = {
};
static drm_ioctl_desc_t ioctls[] = {
[DRM_IOCTL_NR(DRM_MGA_INIT)] = { mga_dma_init, 1, 1 },
[DRM_IOCTL_NR(DRM_MGA_FLUSH)] = { mga_dma_flush, 1, 0 },
[DRM_IOCTL_NR(DRM_MGA_RESET)] = { mga_dma_reset, 1, 0 },
[DRM_IOCTL_NR(DRM_MGA_SWAP)] = { mga_dma_swap, 1, 0 },
[DRM_IOCTL_NR(DRM_MGA_CLEAR)] = { mga_dma_clear, 1, 0 },
[DRM_IOCTL_NR(DRM_MGA_VERTEX)] = { mga_dma_vertex, 1, 0 },
[DRM_IOCTL_NR(DRM_MGA_INDICES)] = { mga_dma_indices, 1, 0 },
[DRM_IOCTL_NR(DRM_MGA_ILOAD)] = { mga_dma_iload, 1, 0 },
[DRM_IOCTL_NR(DRM_MGA_BLIT)] = { mga_dma_blit, 1, 0 },
[DRM_IOCTL_NR(DRM_MGA_GETPARAM)]= { mga_getparam, 1, 0 },
[DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, 1, 1},
[DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, 1, 0},
};
static struct drm_driver_fn driver_fn = {
.driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_IRQ_VBL,
.pretakedown = mga_driver_pretakedown,
.dma_quiescent = mga_driver_dma_quiescent,
.vblank_wait = mga_driver_vblank_wait,
@ -104,13 +105,13 @@ static struct drm_driver_fn driver_fn = {
.num_ioctls = DRM_ARRAY_SIZE(ioctls),
.dma_ioctl = mga_dma_buffers,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.fasync = drm_fasync,
},
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.fasync = drm_fasync,
},
};
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@ -119,10 +120,10 @@ static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
static struct pci_driver driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
};
static int __init mga_init(void)
@ -138,6 +139,6 @@ static void __exit mga_exit(void)
module_init(mga_init);
module_exit(mga_exit);
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");

View File

@ -38,30 +38,28 @@
#include "drm_pciids.h"
static int postinit( struct drm_device *dev, unsigned long flags )
static int postinit(struct drm_device *dev, unsigned long flags)
{
DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d: %s\n",
DRIVER_NAME,
DRIVER_MAJOR,
DRIVER_MINOR,
DRIVER_PATCHLEVEL,
DRIVER_DATE,
dev->minor,
pci_pretty_name(dev->pdev)
);
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
DRIVER_NAME,
DRIVER_MAJOR,
DRIVER_MINOR,
DRIVER_PATCHLEVEL,
DRIVER_DATE, dev->minor, pci_pretty_name(dev->pdev)
);
return 0;
}
static int version( drm_version_t *version )
static int version(drm_version_t * version)
{
int len;
version->version_major = DRIVER_MAJOR;
version->version_minor = DRIVER_MINOR;
version->version_patchlevel = DRIVER_PATCHLEVEL;
DRM_COPY( version->name, DRIVER_NAME );
DRM_COPY( version->date, DRIVER_DATE );
DRM_COPY( version->desc, DRIVER_DESC );
DRM_COPY(version->name, DRIVER_NAME);
DRM_COPY(version->date, DRIVER_DATE);
DRM_COPY(version->desc, DRIVER_DESC);
return 0;
}
@ -76,27 +74,30 @@ static struct pci_device_id pciidlist[] = {
* 2.5 - Add FLIP ioctl, disable FULLSCREEN.
*/
static drm_ioctl_desc_t ioctls[] = {
[DRM_IOCTL_NR(DRM_R128_INIT)] = { r128_cce_init, 1, 1 },
[DRM_IOCTL_NR(DRM_R128_CCE_START)] = { r128_cce_start, 1, 1 },
[DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = { r128_cce_stop, 1, 1 },
[DRM_IOCTL_NR(DRM_R128_CCE_RESET)] = { r128_cce_reset, 1, 1 },
[DRM_IOCTL_NR(DRM_R128_CCE_IDLE)] = { r128_cce_idle, 1, 0 },
[DRM_IOCTL_NR(DRM_R128_RESET)] = { r128_engine_reset, 1, 0 },
[DRM_IOCTL_NR(DRM_R128_FULLSCREEN)] = { r128_fullscreen, 1, 0 },
[DRM_IOCTL_NR(DRM_R128_SWAP)] = { r128_cce_swap, 1, 0 },
[DRM_IOCTL_NR(DRM_R128_FLIP)] = { r128_cce_flip, 1, 0 },
[DRM_IOCTL_NR(DRM_R128_CLEAR)] = { r128_cce_clear, 1, 0 },
[DRM_IOCTL_NR(DRM_R128_VERTEX)] = { r128_cce_vertex, 1, 0 },
[DRM_IOCTL_NR(DRM_R128_INDICES)] = { r128_cce_indices, 1, 0 },
[DRM_IOCTL_NR(DRM_R128_BLIT)] = { r128_cce_blit, 1, 0 },
[DRM_IOCTL_NR(DRM_R128_DEPTH)] = { r128_cce_depth, 1, 0 },
[DRM_IOCTL_NR(DRM_R128_STIPPLE)] = { r128_cce_stipple, 1, 0 },
[DRM_IOCTL_NR(DRM_R128_INDIRECT)] = { r128_cce_indirect, 1, 1 },
[DRM_IOCTL_NR(DRM_R128_GETPARAM)] = { r128_getparam, 1, 0 },
[DRM_IOCTL_NR(DRM_R128_INIT)] = {r128_cce_init, 1, 1},
[DRM_IOCTL_NR(DRM_R128_CCE_START)] = {r128_cce_start, 1, 1},
[DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = {r128_cce_stop, 1, 1},
[DRM_IOCTL_NR(DRM_R128_CCE_RESET)] = {r128_cce_reset, 1, 1},
[DRM_IOCTL_NR(DRM_R128_CCE_IDLE)] = {r128_cce_idle, 1, 0},
[DRM_IOCTL_NR(DRM_R128_RESET)] = {r128_engine_reset, 1, 0},
[DRM_IOCTL_NR(DRM_R128_FULLSCREEN)] = {r128_fullscreen, 1, 0},
[DRM_IOCTL_NR(DRM_R128_SWAP)] = {r128_cce_swap, 1, 0},
[DRM_IOCTL_NR(DRM_R128_FLIP)] = {r128_cce_flip, 1, 0},
[DRM_IOCTL_NR(DRM_R128_CLEAR)] = {r128_cce_clear, 1, 0},
[DRM_IOCTL_NR(DRM_R128_VERTEX)] = {r128_cce_vertex, 1, 0},
[DRM_IOCTL_NR(DRM_R128_INDICES)] = {r128_cce_indices, 1, 0},
[DRM_IOCTL_NR(DRM_R128_BLIT)] = {r128_cce_blit, 1, 0},
[DRM_IOCTL_NR(DRM_R128_DEPTH)] = {r128_cce_depth, 1, 0},
[DRM_IOCTL_NR(DRM_R128_STIPPLE)] = {r128_cce_stipple, 1, 0},
[DRM_IOCTL_NR(DRM_R128_INDIRECT)] = {r128_cce_indirect, 1, 1},
[DRM_IOCTL_NR(DRM_R128_GETPARAM)] = {r128_getparam, 1, 0},
};
static struct drm_driver_fn driver_fn = {
.driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_IRQ_VBL,
.dev_priv_size = sizeof(drm_r128_buf_priv_t),
.prerelease = r128_driver_prerelease,
.pretakedown = r128_driver_pretakedown,
@ -114,13 +115,14 @@ static struct drm_driver_fn driver_fn = {
.num_ioctls = DRM_ARRAY_SIZE(ioctls),
.dma_ioctl = r128_cce_buffers,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.fasync = drm_fasync,
},
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.fasync = drm_fasync,
}
,
};
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@ -129,10 +131,10 @@ static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
static struct pci_driver driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
};
static int __init r128_init(void)
@ -148,6 +150,6 @@ static void __exit r128_exit(void)
module_init(r128_init);
module_exit(r128_exit);
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");

View File

@ -29,7 +29,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/config.h>
#include "drmP.h"
#include "drm.h"
@ -39,30 +38,28 @@
#include "drm_pciids.h"
static int postinit( struct drm_device *dev, unsigned long flags )
static int postinit(struct drm_device *dev, unsigned long flags)
{
DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d: %s\n",
DRIVER_NAME,
DRIVER_MAJOR,
DRIVER_MINOR,
DRIVER_PATCHLEVEL,
DRIVER_DATE,
dev->minor,
pci_pretty_name(dev->pdev)
);
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
DRIVER_NAME,
DRIVER_MAJOR,
DRIVER_MINOR,
DRIVER_PATCHLEVEL,
DRIVER_DATE, dev->minor, pci_pretty_name(dev->pdev)
);
return 0;
}
static int version( drm_version_t *version )
static int version(drm_version_t * version)
{
int len;
version->version_major = DRIVER_MAJOR;
version->version_minor = DRIVER_MINOR;
version->version_patchlevel = DRIVER_PATCHLEVEL;
DRM_COPY( version->name, DRIVER_NAME );
DRM_COPY( version->date, DRIVER_DATE );
DRM_COPY( version->desc, DRIVER_DESC );
DRM_COPY(version->name, DRIVER_NAME);
DRM_COPY(version->date, DRIVER_DATE);
DRM_COPY(version->desc, DRIVER_DESC);
return 0;
}
@ -104,35 +101,38 @@ static struct pci_device_id pciidlist[] = {
* and GL_EXT_blend_[func|equation]_separate on r200
*/
static drm_ioctl_desc_t ioctls[] = {
[DRM_IOCTL_NR(DRM_RADEON_CP_INIT)] = { radeon_cp_init, 1, 1 },
[DRM_IOCTL_NR(DRM_RADEON_CP_START)] = { radeon_cp_start, 1, 1 },
[DRM_IOCTL_NR(DRM_RADEON_CP_STOP)] = { radeon_cp_stop, 1, 1 },
[DRM_IOCTL_NR(DRM_RADEON_CP_RESET)] = { radeon_cp_reset, 1, 1 },
[DRM_IOCTL_NR(DRM_RADEON_CP_IDLE)] = { radeon_cp_idle, 1, 0 },
[DRM_IOCTL_NR(DRM_RADEON_CP_RESUME)] = { radeon_cp_resume, 1, 0 },
[DRM_IOCTL_NR(DRM_RADEON_RESET)] = { radeon_engine_reset, 1, 0 },
[DRM_IOCTL_NR(DRM_RADEON_FULLSCREEN)] = { radeon_fullscreen, 1, 0 },
[DRM_IOCTL_NR(DRM_RADEON_SWAP)] = { radeon_cp_swap, 1, 0 },
[DRM_IOCTL_NR(DRM_RADEON_CLEAR)] = { radeon_cp_clear, 1, 0 },
[DRM_IOCTL_NR(DRM_RADEON_VERTEX)] = { radeon_cp_vertex, 1, 0 },
[DRM_IOCTL_NR(DRM_RADEON_INDICES)] = { radeon_cp_indices, 1, 0 },
[DRM_IOCTL_NR(DRM_RADEON_TEXTURE)] = { radeon_cp_texture, 1, 0 },
[DRM_IOCTL_NR(DRM_RADEON_STIPPLE)] = { radeon_cp_stipple, 1, 0 },
[DRM_IOCTL_NR(DRM_RADEON_INDIRECT)] = { radeon_cp_indirect, 1, 1 },
[DRM_IOCTL_NR(DRM_RADEON_VERTEX2)] = { radeon_cp_vertex2, 1, 0 },
[DRM_IOCTL_NR(DRM_RADEON_CMDBUF)] = { radeon_cp_cmdbuf, 1, 0 },
[DRM_IOCTL_NR(DRM_RADEON_GETPARAM)] = { radeon_cp_getparam, 1, 0 },
[DRM_IOCTL_NR(DRM_RADEON_FLIP)] = { radeon_cp_flip, 1, 0 },
[DRM_IOCTL_NR(DRM_RADEON_ALLOC)] = { radeon_mem_alloc, 1, 0 },
[DRM_IOCTL_NR(DRM_RADEON_FREE)] = { radeon_mem_free, 1, 0 },
[DRM_IOCTL_NR(DRM_RADEON_INIT_HEAP)] = { radeon_mem_init_heap,1, 1 },
[DRM_IOCTL_NR(DRM_RADEON_IRQ_EMIT)] = { radeon_irq_emit, 1, 0 },
[DRM_IOCTL_NR(DRM_RADEON_IRQ_WAIT)] = { radeon_irq_wait, 1, 0 },
[DRM_IOCTL_NR(DRM_RADEON_SETPARAM)] = { radeon_cp_setparam, 1, 0 },
[DRM_IOCTL_NR(DRM_RADEON_CP_INIT)] = {radeon_cp_init, 1, 1},
[DRM_IOCTL_NR(DRM_RADEON_CP_START)] = {radeon_cp_start, 1, 1},
[DRM_IOCTL_NR(DRM_RADEON_CP_STOP)] = {radeon_cp_stop, 1, 1},
[DRM_IOCTL_NR(DRM_RADEON_CP_RESET)] = {radeon_cp_reset, 1, 1},
[DRM_IOCTL_NR(DRM_RADEON_CP_IDLE)] = {radeon_cp_idle, 1, 0},
[DRM_IOCTL_NR(DRM_RADEON_CP_RESUME)] = {radeon_cp_resume, 1, 0},
[DRM_IOCTL_NR(DRM_RADEON_RESET)] = {radeon_engine_reset, 1, 0},
[DRM_IOCTL_NR(DRM_RADEON_FULLSCREEN)] = {radeon_fullscreen, 1, 0},
[DRM_IOCTL_NR(DRM_RADEON_SWAP)] = {radeon_cp_swap, 1, 0},
[DRM_IOCTL_NR(DRM_RADEON_CLEAR)] = {radeon_cp_clear, 1, 0},
[DRM_IOCTL_NR(DRM_RADEON_VERTEX)] = {radeon_cp_vertex, 1, 0},
[DRM_IOCTL_NR(DRM_RADEON_INDICES)] = {radeon_cp_indices, 1, 0},
[DRM_IOCTL_NR(DRM_RADEON_TEXTURE)] = {radeon_cp_texture, 1, 0},
[DRM_IOCTL_NR(DRM_RADEON_STIPPLE)] = {radeon_cp_stipple, 1, 0},
[DRM_IOCTL_NR(DRM_RADEON_INDIRECT)] = {radeon_cp_indirect, 1, 1},
[DRM_IOCTL_NR(DRM_RADEON_VERTEX2)] = {radeon_cp_vertex2, 1, 0},
[DRM_IOCTL_NR(DRM_RADEON_CMDBUF)] = {radeon_cp_cmdbuf, 1, 0},
[DRM_IOCTL_NR(DRM_RADEON_GETPARAM)] = {radeon_cp_getparam, 1, 0},
[DRM_IOCTL_NR(DRM_RADEON_FLIP)] = {radeon_cp_flip, 1, 0},
[DRM_IOCTL_NR(DRM_RADEON_ALLOC)] = {radeon_mem_alloc, 1, 0},
[DRM_IOCTL_NR(DRM_RADEON_FREE)] = {radeon_mem_free, 1, 0},
[DRM_IOCTL_NR(DRM_RADEON_INIT_HEAP)] = {radeon_mem_init_heap, 1, 1},
[DRM_IOCTL_NR(DRM_RADEON_IRQ_EMIT)] = {radeon_irq_emit, 1, 0},
[DRM_IOCTL_NR(DRM_RADEON_IRQ_WAIT)] = {radeon_irq_wait, 1, 0},
[DRM_IOCTL_NR(DRM_RADEON_SETPARAM)] = {radeon_cp_setparam, 1, 0},
};
static struct drm_driver_fn driver_fn = {
.driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED |
DRIVER_IRQ_VBL,
.dev_priv_size = sizeof(drm_radeon_buf_priv_t),
.preinit = radeon_preinit,
.postinit = radeon_postinit,
@ -155,13 +155,14 @@ static struct drm_driver_fn driver_fn = {
.num_ioctls = DRM_ARRAY_SIZE(ioctls),
.dma_ioctl = radeon_cp_buffers,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.fasync = drm_fasync,
},
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.fasync = drm_fasync,
}
,
};
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@ -170,10 +171,10 @@ static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
static struct pci_driver driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
};
static int __init radeon_init(void)
@ -189,6 +190,6 @@ static void __exit radeon_exit(void)
module_init(radeon_init);
module_exit(radeon_exit);
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");

View File

@ -31,7 +31,7 @@
#include "radeon_drm.h"
#include "radeon_drv.h"
static void gpio_setscl(void* data, int state)
static void gpio_setscl(void *data, int state)
{
struct radeon_i2c_chan *chan = data;
drm_radeon_private_t *dev_priv = chan->dev->dev_private;
@ -45,7 +45,7 @@ static void gpio_setscl(void* data, int state)
(void)RADEON_READ(chan->ddc_reg);
}
static void gpio_setsda(void* data, int state)
static void gpio_setsda(void *data, int state)
{
struct radeon_i2c_chan *chan = data;
drm_radeon_private_t *dev_priv = chan->dev->dev_private;
@ -59,7 +59,7 @@ static void gpio_setsda(void* data, int state)
(void)RADEON_READ(chan->ddc_reg);
}
static int gpio_getscl(void* data)
static int gpio_getscl(void *data)
{
struct radeon_i2c_chan *chan = data;
drm_radeon_private_t *dev_priv = chan->dev->dev_private;
@ -70,7 +70,7 @@ static int gpio_getscl(void* data)
return (val & VGA_DDC_CLK_INPUT) ? 1 : 0;
}
static int gpio_getsda(void* data)
static int gpio_getsda(void *data)
{
struct radeon_i2c_chan *chan = data;
drm_radeon_private_t *dev_priv = chan->dev->dev_private;
@ -86,17 +86,17 @@ static int setup_i2c_bus(struct radeon_i2c_chan *chan, const char *name)
int rc;
strcpy(chan->adapter.name, name);
chan->adapter.owner = THIS_MODULE;
chan->adapter.id = I2C_ALGO_ATI;
chan->adapter.algo_data = &chan->algo;
chan->adapter.dev.parent = &chan->dev->pdev->dev;
chan->algo.setsda = gpio_setsda;
chan->algo.setscl = gpio_setscl;
chan->algo.getsda = gpio_getsda;
chan->algo.getscl = gpio_getscl;
chan->algo.udelay = 40;
chan->algo.timeout = 20;
chan->algo.data = chan;
chan->adapter.owner = THIS_MODULE;
chan->adapter.id = I2C_ALGO_ATI;
chan->adapter.algo_data = &chan->algo;
chan->adapter.dev.parent = &chan->dev->pdev->dev;
chan->algo.setsda = gpio_setsda;
chan->algo.setscl = gpio_setscl;
chan->algo.getsda = gpio_getsda;
chan->algo.getscl = gpio_getscl;
chan->algo.udelay = 40;
chan->algo.timeout = 20;
chan->algo.data = chan;
i2c_set_adapdata(&chan->adapter, chan);
@ -113,7 +113,7 @@ static int setup_i2c_bus(struct radeon_i2c_chan *chan, const char *name)
return rc;
}
int radeon_create_i2c_busses(drm_device_t *dev)
int radeon_create_i2c_busses(drm_device_t * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
int ret;
@ -140,7 +140,7 @@ int radeon_create_i2c_busses(drm_device_t *dev)
return 0;
}
void radeon_delete_i2c_busses(drm_device_t *dev)
void radeon_delete_i2c_busses(drm_device_t * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
int i, ret;

View File

@ -17,13 +17,11 @@
#include <linux/i2c-algo-bit.h>
struct radeon_i2c_chan {
drm_device_t *dev;
u32 ddc_reg;
struct i2c_adapter adapter;
struct i2c_algo_bit_data algo;
drm_device_t *dev;
u32 ddc_reg;
struct i2c_adapter adapter;
struct i2c_algo_bit_data algo;
};
extern int radeon_create_i2c_busses(drm_device_t *dev);
extern void radeon_delete_i2c_busses(drm_device_t *dev);
extern int radeon_create_i2c_busses(drm_device_t * dev);
extern void radeon_delete_i2c_busses(drm_device_t * dev);

View File

@ -22,59 +22,58 @@
* DEALINGS IN THE SOFTWARE.
*/
/*=========================================================*/
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
#include "drmP.h"
#include "savage_drm.h"
#include "savage_drv.h"
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
#define SAVAGE_DEFAULT_USEC_TIMEOUT 10000
#define SAVAGE_FREELIST_DEBUG 0
int savage_preinit( drm_device_t *dev, unsigned long chipset )
int savage_preinit(drm_device_t * dev, unsigned long chipset)
{
drm_savage_private_t *dev_priv;
unsigned mmioBase, fbBase, fbSize, apertureBase;
int ret = 0;
dev_priv = drm_alloc( sizeof(drm_savage_private_t), DRM_MEM_DRIVER );
if ( dev_priv == NULL )
dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
if (dev_priv == NULL)
return DRM_ERR(ENOMEM);
memset( dev_priv, 0, sizeof(drm_savage_private_t) );
memset(dev_priv, 0, sizeof(drm_savage_private_t));
dev->dev_private = (void *)dev_priv;
dev_priv->chipset = (enum savage_family)chipset;
if( S3_SAVAGE3D_SERIES(dev_priv->chipset) ) {
fbBase = pci_resource_start( dev->pdev, 0 );
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
fbBase = pci_resource_start(dev->pdev, 0);
fbSize = SAVAGE_FB_SIZE_S3;
mmioBase = fbBase + fbSize;
apertureBase = fbBase + SAVAGE_APERTURE_OFFSET;
} else if( chipset != S3_SUPERSAVAGE ) {
mmioBase = pci_resource_start( dev->pdev, 0 );
fbBase = pci_resource_start( dev->pdev, 1 );
} else if (chipset != S3_SUPERSAVAGE) {
mmioBase = pci_resource_start(dev->pdev, 0);
fbBase = pci_resource_start(dev->pdev, 1);
fbSize = SAVAGE_FB_SIZE_S4;
apertureBase = fbBase + SAVAGE_APERTURE_OFFSET;
} else {
mmioBase = pci_resource_start( dev->pdev, 0 );
fbBase = pci_resource_start( dev->pdev, 1 );
fbSize = pci_resource_len( dev->pdev, 1 );
apertureBase = pci_resource_start( dev->pdev, 2 );
mmioBase = pci_resource_start(dev->pdev, 0);
fbBase = pci_resource_start(dev->pdev, 1);
fbSize = pci_resource_len(dev->pdev, 1);
apertureBase = pci_resource_start(dev->pdev, 2);
}
if( (ret = drm_initmap( dev, mmioBase, SAVAGE_MMIO_SIZE,
_DRM_REGISTERS, 0 )))
if ((ret = drm_initmap(dev, mmioBase, SAVAGE_MMIO_SIZE,
_DRM_REGISTERS, 0)))
return ret;
if( (ret = drm_initmap( dev, fbBase, fbSize,
_DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING )))
if ((ret = drm_initmap(dev, fbBase, fbSize,
_DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING)))
return ret;
if( (ret = drm_initmap( dev, apertureBase, SAVAGE_APERTURE_SIZE,
_DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING )))
if ((ret = drm_initmap(dev, apertureBase, SAVAGE_APERTURE_SIZE,
_DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING)))
return ret;
return ret;

View File

@ -34,22 +34,20 @@
#define DRM_SAVAGE_MEM_LOCATION_AGP 2
#define DRM_SAVAGE_DMA_AGP_SIZE (16*1024*1024)
typedef struct drm_savage_alloc_cont_mem
{
size_t size; /*size of buffer*/
unsigned long type; /*4k page or word*/
unsigned long alignment;
unsigned long location; /*agp or pci*/
typedef struct drm_savage_alloc_cont_mem {
size_t size; /*size of buffer */
unsigned long type; /*4k page or word */
unsigned long alignment;
unsigned long location; /*agp or pci */
unsigned long phyaddress;
unsigned long linear;
unsigned long phyaddress;
unsigned long linear;
} drm_savage_alloc_cont_mem_t;
typedef struct drm_savage_get_physcis_address
{
unsigned long v_address;
unsigned long p_address;
}drm_savage_get_physcis_address_t;
typedef struct drm_savage_get_physcis_address {
unsigned long v_address;
unsigned long p_address;
} drm_savage_get_physcis_address_t;
/*ioctl number*/
#define DRM_IOCTL_SAVAGE_ALLOC_CONTINUOUS_MEM \
@ -69,12 +67,12 @@ typedef struct drm_savage_get_physcis_address
#define SAVAGE_UPLOAD_CTX 0x1
#define SAVAGE_UPLOAD_TEX0 0x2
#define SAVAGE_UPLOAD_TEX1 0x4
#define SAVAGE_UPLOAD_PIPE 0x8 /* <- seems should be removed, Jiayo Hsu */
#define SAVAGE_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */
#define SAVAGE_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */
#define SAVAGE_UPLOAD_PIPE 0x8 /* <- seems should be removed, Jiayo Hsu */
#define SAVAGE_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */
#define SAVAGE_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */
#define SAVAGE_UPLOAD_2D 0x40
#define SAVAGE_WAIT_AGE 0x80 /* handled client-side */
#define SAVAGE_UPLOAD_CLIPRECTS 0x100 /* handled client-side */
#define SAVAGE_WAIT_AGE 0x80 /* handled client-side */
#define SAVAGE_UPLOAD_CLIPRECTS 0x100 /* handled client-side */
/*frank:add Buffer state 2001/11/15*/
#define SAVAGE_UPLOAD_BUFFERS 0x200
/* original marked off in MGA drivers , Jiayo Hsu Oct.23,2001 */
@ -93,10 +91,10 @@ typedef struct drm_savage_get_physcis_address
#define SAVAGE_CARD_HEAP 0
#define SAVAGE_AGP_HEAP 1
#define SAVAGE_NR_TEX_HEAPS 2
#define SAVAGE_NR_TEX_REGIONS 16 /* num. of global texture manage list element*/
#define SAVAGE_LOG_MIN_TEX_REGION_SIZE 16 /* each region 64K, Jiayo Hsu */
#define SAVAGE_NR_TEX_REGIONS 16 /* num. of global texture manage list element */
#define SAVAGE_LOG_MIN_TEX_REGION_SIZE 16 /* each region 64K, Jiayo Hsu */
#endif /* __SAVAGE_SAREA_DEFINES__ */
#endif /* __SAVAGE_SAREA_DEFINES__ */
/* drm_tex_region_t define in drm.h */
@ -108,22 +106,21 @@ typedef struct {
unsigned int pitch;
} drm_savage_server_regs_t;
typedef struct _drm_savage_sarea {
/* The channel for communication of state information to the kernel
* on firing a vertex dma buffer.
*/
unsigned int setup[28]; /* 3D context registers */
drm_savage_server_regs_t server_state;
unsigned int setup[28]; /* 3D context registers */
drm_savage_server_regs_t server_state;
unsigned int dirty;
unsigned int dirty;
unsigned int vertsize; /* vertext size */
unsigned int vertsize; /* vertext size */
/* The current cliprects, or a subset thereof.
*/
drm_clip_rect_t boxes[SAVAGE_NR_SAREA_CLIPRECTS];
unsigned int nbox;
drm_clip_rect_t boxes[SAVAGE_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Information about the most recently used 3d drawable. The
* client fills in the req_* fields, the server fills in the
@ -132,58 +129,56 @@ typedef struct _drm_savage_sarea {
* The client clears the exported_drawable field before
* clobbering the boxes data.
*/
unsigned int req_drawable; /* the X drawable id */
unsigned int req_draw_buffer; /* SAVAGE_FRONT or SAVAGE_BACK */
unsigned int req_drawable; /* the X drawable id */
unsigned int req_draw_buffer; /* SAVAGE_FRONT or SAVAGE_BACK */
unsigned int exported_drawable;
unsigned int exported_drawable;
unsigned int exported_index;
unsigned int exported_stamp;
unsigned int exported_buffers;
unsigned int exported_nfront;
unsigned int exported_nback;
unsigned int exported_stamp;
unsigned int exported_buffers;
unsigned int exported_nfront;
unsigned int exported_nback;
int exported_back_x, exported_front_x, exported_w;
int exported_back_y, exported_front_y, exported_h;
drm_clip_rect_t exported_boxes[SAVAGE_NR_SAREA_CLIPRECTS];
drm_clip_rect_t exported_boxes[SAVAGE_NR_SAREA_CLIPRECTS];
/* Counters for aging textures and for client-side throttling.
*/
unsigned int status[4];
/* LRU lists for texture memory in agp space and on the card.
*/
drm_tex_region_t texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS+1];
drm_tex_region_t texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS +
1];
unsigned int texAge[SAVAGE_NR_TEX_HEAPS];
/* Mechanism to validate card state.
*/
int ctxOwner;
unsigned long shadow_status[64];/*too big?*/
int ctxOwner;
unsigned long shadow_status[64]; /*too big? */
/*agp offset*/
/*agp offset */
unsigned long agp_offset;
} drm_savage_sarea_t,*drm_savage_sarea_ptr;
} drm_savage_sarea_t, *drm_savage_sarea_ptr;
typedef struct drm_savage_init {
unsigned long sarea_priv_offset;
unsigned long sarea_priv_offset;
int chipset;
int sgram;
int sgram;
unsigned int maccess;
unsigned int fb_cpp;
unsigned int fb_cpp;
unsigned int front_offset, front_pitch;
unsigned int back_offset, back_pitch;
unsigned int back_offset, back_pitch;
unsigned int depth_cpp;
unsigned int depth_offset, depth_pitch;
unsigned int depth_cpp;
unsigned int depth_offset, depth_pitch;
unsigned int texture_offset[SAVAGE_NR_TEX_HEAPS];
unsigned int texture_size[SAVAGE_NR_TEX_HEAPS];
unsigned int texture_offset[SAVAGE_NR_TEX_HEAPS];
unsigned int texture_size[SAVAGE_NR_TEX_HEAPS];
unsigned long fb_offset;
unsigned long mmio_offset;
@ -192,7 +187,7 @@ typedef struct drm_savage_init {
typedef struct drm_savage_fullscreen {
enum {
SAVAGE_INIT_FULLSCREEN = 0x01,
SAVAGE_INIT_FULLSCREEN = 0x01,
SAVAGE_CLEANUP_FULLSCREEN = 0x02
} func;
} drm_savage_fullscreen_t;
@ -206,16 +201,16 @@ typedef struct drm_savage_clear {
} drm_savage_clear_t;
typedef struct drm_savage_vertex {
int idx; /* buffer to queue */
int used; /* bytes in use */
int discard; /* client finished with buffer? */
int idx; /* buffer to queue */
int used; /* bytes in use */
int discard; /* client finished with buffer? */
} drm_savage_vertex_t;
typedef struct drm_savage_indices {
int idx; /* buffer to queue */
int idx; /* buffer to queue */
unsigned int start;
unsigned int end;
int discard; /* client finished with buffer? */
int discard; /* client finished with buffer? */
} drm_savage_indices_t;
typedef struct drm_savage_iload {
@ -231,7 +226,7 @@ typedef struct _drm_savage_blit {
int src_pitch, dst_pitch;
int delta_sx, delta_sy;
int delta_dx, delta_dy;
int height, ydir; /* flip image vertically */
int height, ydir; /* flip image vertically */
int source_pitch, dest_pitch;
} drm_savage_blit_t;

View File

@ -26,19 +26,19 @@
/* these chip tags should match the ones in the 2D driver in savage_regs.h. */
enum savage_family {
S3_UNKNOWN = 0,
S3_SAVAGE3D,
S3_SAVAGE_MX,
S3_SAVAGE4,
S3_PROSAVAGE,
S3_TWISTER,
S3_PROSAVAGEDDR,
S3_SUPERSAVAGE,
S3_SAVAGE2000,
S3_LAST
S3_UNKNOWN = 0,
S3_SAVAGE3D,
S3_SAVAGE_MX,
S3_SAVAGE4,
S3_PROSAVAGE,
S3_TWISTER,
S3_PROSAVAGEDDR,
S3_SUPERSAVAGE,
S3_SAVAGE2000,
S3_LAST
};
extern int savage_preinit( drm_device_t *dev, unsigned long chipset );
extern int savage_preinit(drm_device_t * dev, unsigned long chipset);
#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
@ -65,10 +65,10 @@ typedef struct drm_savage_private {
} drm_savage_private_t;
#define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */
#define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */
#define SAVAGE_MMIO_SIZE 0x00080000 /* 512kB */
#define SAVAGE_APERTURE_OFFSET 0x02000000 /* 32MB */
#define SAVAGE_APERTURE_SIZE 0x05000000 /* 5 tiled surfaces, 16MB each */
#define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */
#define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */
#define SAVAGE_MMIO_SIZE 0x00080000 /* 512kB */
#define SAVAGE_APERTURE_OFFSET 0x02000000 /* 32MB */
#define SAVAGE_APERTURE_SIZE 0x05000000 /* 5 tiled surfaces, 16MB each */
#endif /* end #ifndef __SAVAGE_DRV_ */
#endif /* end #ifndef __SAVAGE_DRV_ */

View File

@ -32,30 +32,28 @@
#include "drm_pciids.h"
static int postinit( struct drm_device *dev, unsigned long flags )
static int postinit(struct drm_device *dev, unsigned long flags)
{
DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d: %s\n",
DRIVER_NAME,
DRIVER_MAJOR,
DRIVER_MINOR,
DRIVER_PATCHLEVEL,
DRIVER_DATE,
dev->minor,
pci_pretty_name(dev->pdev)
);
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
DRIVER_NAME,
DRIVER_MAJOR,
DRIVER_MINOR,
DRIVER_PATCHLEVEL,
DRIVER_DATE, dev->minor, pci_pretty_name(dev->pdev)
);
return 0;
}
static int version( drm_version_t *version )
static int version(drm_version_t * version)
{
int len;
version->version_major = DRIVER_MAJOR;
version->version_minor = DRIVER_MINOR;
version->version_patchlevel = DRIVER_PATCHLEVEL;
DRM_COPY( version->name, DRIVER_NAME );
DRM_COPY( version->date, DRIVER_DATE );
DRM_COPY( version->desc, DRIVER_DESC );
DRM_COPY(version->name, DRIVER_NAME);
DRM_COPY(version->date, DRIVER_DATE);
DRM_COPY(version->desc, DRIVER_DESC);
return 0;
}
@ -64,12 +62,12 @@ static struct pci_device_id pciidlist[] = {
};
static drm_ioctl_desc_t ioctls[] = {
[DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = { sis_fb_alloc, 1, 0 },
[DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = { sis_fb_free, 1, 0 },
[DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = { sis_ioctl_agp_init, 1, 1 },
[DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = { sis_ioctl_agp_alloc, 1, 0 },
[DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = { sis_ioctl_agp_free, 1, 0 },
[DRM_IOCTL_NR(DRM_SIS_FB_INIT)] = { sis_fb_init, 1, 1 }
[DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, 1, 0},
[DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_fb_free, 1, 0},
[DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = {sis_ioctl_agp_init, 1, 1},
[DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = {sis_ioctl_agp_alloc, 1, 0},
[DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = {sis_ioctl_agp_free, 1, 0},
[DRM_IOCTL_NR(DRM_SIS_FB_INIT)] = {sis_fb_init, 1, 1}
};
static struct drm_driver_fn driver_fn = {
@ -84,13 +82,13 @@ static struct drm_driver_fn driver_fn = {
.ioctls = ioctls,
.num_ioctls = DRM_ARRAY_SIZE(ioctls),
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.fasync = drm_fasync,
},
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.fasync = drm_fasync,
},
};
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@ -99,10 +97,10 @@ static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
static struct pci_driver driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
};
static int __init sis_init(void)
@ -118,6 +116,6 @@ static void __exit sis_exit(void)
module_init(sis_init);
module_exit(sis_exit);
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");

View File

@ -36,30 +36,28 @@
#include "drm_pciids.h"
static int postinit( struct drm_device *dev, unsigned long flags )
static int postinit(struct drm_device *dev, unsigned long flags)
{
DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d: %s\n",
DRIVER_NAME,
DRIVER_MAJOR,
DRIVER_MINOR,
DRIVER_PATCHLEVEL,
DRIVER_DATE,
dev->minor,
pci_pretty_name(dev->pdev)
);
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
DRIVER_NAME,
DRIVER_MAJOR,
DRIVER_MINOR,
DRIVER_PATCHLEVEL,
DRIVER_DATE, dev->minor, pci_pretty_name(dev->pdev)
);
return 0;
}
static int version( drm_version_t *version )
static int version(drm_version_t * version)
{
int len;
version->version_major = DRIVER_MAJOR;
version->version_minor = DRIVER_MINOR;
version->version_patchlevel = DRIVER_PATCHLEVEL;
DRM_COPY( version->name, DRIVER_NAME );
DRM_COPY( version->date, DRIVER_DATE );
DRM_COPY( version->desc, DRIVER_DESC );
DRM_COPY(version->name, DRIVER_NAME);
DRM_COPY(version->date, DRIVER_DATE);
DRM_COPY(version->desc, DRIVER_DESC);
return 0;
}
@ -75,13 +73,13 @@ static struct drm_driver_fn driver_fn = {
.postinit = postinit,
.version = version,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.fasync = drm_fasync,
},
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.fasync = drm_fasync,
},
};
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@ -90,10 +88,10 @@ static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
static struct pci_driver driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
};
static int __init tdfx_init(void)
@ -109,6 +107,6 @@ static void __exit tdfx_exit(void)
module_init(tdfx_init);
module_exit(tdfx_exit);
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");

View File

@ -79,7 +79,7 @@
#define ioctl(a,b,c) xf86ioctl(a,b,c)
#else
#include <sys/ioccom.h>
#endif /* __FreeBSD__ && xf86ioctl */
#endif /* __FreeBSD__ && xf86ioctl */
#define DRM_IOCTL_NR(n) ((n) & 0xff)
#define DRM_IOC_VOID IOC_VOID
#define DRM_IOC_READ IOC_OUT
@ -125,12 +125,10 @@
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
typedef unsigned long drm_handle_t; /**< To mapped regions */
typedef unsigned int drm_context_t; /**< GLXContext handle */
typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t; /**< Magic for authentication */
typedef unsigned long drm_handle_t; /**< To mapped regions */
typedef unsigned int drm_context_t; /**< GLXContext handle */
typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t; /**< Magic for authentication */
/**
* Cliprect.
@ -142,22 +140,21 @@ typedef unsigned int drm_magic_t; /**< Magic for authentication */
* backwards-compatibility reasons.
*/
typedef struct drm_clip_rect {
unsigned short x1;
unsigned short y1;
unsigned short x2;
unsigned short y2;
unsigned short x1;
unsigned short y1;
unsigned short x2;
unsigned short y2;
} drm_clip_rect_t;
/**
* Texture region,
*/
typedef struct drm_tex_region {
unsigned char next;
unsigned char prev;
unsigned char in_use;
unsigned char padding;
unsigned int age;
unsigned char next;
unsigned char prev;
unsigned char in_use;
unsigned char padding;
unsigned int age;
} drm_tex_region_t;
/**
@ -169,10 +166,9 @@ typedef struct drm_tex_region {
*/
typedef struct drm_hw_lock {
__volatile__ unsigned int lock; /**< lock variable */
char padding[60]; /**< Pad to cache line */
char padding[60]; /**< Pad to cache line */
} drm_hw_lock_t;
/* This is beyond ugly, and only works on GCC. However, it allows me to use
* drm.h in places (i.e., in the X-server) where I can't use size_t. The real
* fix is to use uint32_t instead of size_t, but that fix will break existing
@ -195,18 +191,17 @@ typedef struct drm_hw_lock {
* \sa drmGetVersion().
*/
typedef struct drm_version {
int version_major; /**< Major version */
int version_minor; /**< Minor version */
int version_patchlevel;/**< Patch level */
int version_major; /**< Major version */
int version_minor; /**< Minor version */
int version_patchlevel; /**< Patch level */
DRM_SIZE_T name_len; /**< Length of name buffer */
char __user *name; /**< Name of driver */
char __user *name; /**< Name of driver */
DRM_SIZE_T date_len; /**< Length of date buffer */
char __user *date; /**< User-space buffer to hold date */
char __user *date; /**< User-space buffer to hold date */
DRM_SIZE_T desc_len; /**< Length of desc buffer */
char __user *desc; /**< User-space buffer to hold desc */
char __user *desc; /**< User-space buffer to hold desc */
} drm_version_t;
/**
* DRM_IOCTL_GET_UNIQUE ioctl argument type.
*
@ -214,22 +209,20 @@ typedef struct drm_version {
*/
typedef struct drm_unique {
DRM_SIZE_T unique_len; /**< Length of unique */
char __user *unique; /**< Unique name for driver instantiation */
char __user *unique; /**< Unique name for driver instantiation */
} drm_unique_t;
#undef DRM_SIZE_T
typedef struct drm_list {
int count; /**< Length of user-space structures */
drm_version_t __user *version;
int count; /**< Length of user-space structures */
drm_version_t __user *version;
} drm_list_t;
typedef struct drm_block {
int unused;
int unused;
} drm_block_t;
/**
* DRM_IOCTL_CONTROL ioctl argument type.
*
@ -241,43 +234,39 @@ typedef struct drm_control {
DRM_RM_COMMAND,
DRM_INST_HANDLER,
DRM_UNINST_HANDLER
} func;
int irq;
} func;
int irq;
} drm_control_t;
/**
* Type of memory to map.
*/
typedef enum drm_map_type {
_DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
_DRM_REGISTERS = 1, /**< no caching, no core dump */
_DRM_SHM = 2, /**< shared, cached */
_DRM_AGP = 3, /**< AGP/GART */
_DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
_DRM_REGISTERS = 1, /**< no caching, no core dump */
_DRM_SHM = 2, /**< shared, cached */
_DRM_AGP = 3, /**< AGP/GART */
_DRM_SCATTER_GATHER = 4 /**< Scatter/gather memory for PCI DMA */
} drm_map_type_t;
/**
* Memory mapping flags.
*/
typedef enum drm_map_flags {
_DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
_DRM_READ_ONLY = 0x02,
_DRM_LOCKED = 0x04, /**< shared, cached, locked */
_DRM_KERNEL = 0x08, /**< kernel requires access */
_DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
_DRM_READ_ONLY = 0x02,
_DRM_LOCKED = 0x04, /**< shared, cached, locked */
_DRM_KERNEL = 0x08, /**< kernel requires access */
_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
_DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
_DRM_REMOVABLE = 0x40 /**< Removable mapping */
_DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
_DRM_REMOVABLE = 0x40 /**< Removable mapping */
} drm_map_flags_t;
typedef struct drm_ctx_priv_map {
unsigned int ctx_id; /**< Context requesting private mapping */
void *handle; /**< Handle of map */
unsigned int ctx_id; /**< Context requesting private mapping */
void *handle; /**< Handle of map */
} drm_ctx_priv_map_t;
/**
* DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
* argument type.
@ -285,30 +274,28 @@ typedef struct drm_ctx_priv_map {
* \sa drmAddMap().
*/
typedef struct drm_map {
unsigned long offset; /**< Requested physical address (0 for SAREA)*/
unsigned long size; /**< Requested physical size (bytes) */
drm_map_type_t type; /**< Type of memory to map */
unsigned long offset; /**< Requested physical address (0 for SAREA)*/
unsigned long size; /**< Requested physical size (bytes) */
drm_map_type_t type; /**< Type of memory to map */
drm_map_flags_t flags; /**< Flags */
void *handle; /**< User-space: "Handle" to pass to mmap() */
void *handle; /**< User-space: "Handle" to pass to mmap() */
/**< Kernel-space: kernel-virtual address */
int mtrr; /**< MTRR slot used */
/* Private data */
int mtrr; /**< MTRR slot used */
/* Private data */
} drm_map_t;
/**
* DRM_IOCTL_GET_CLIENT ioctl argument type.
*/
typedef struct drm_client {
int idx; /**< Which client desired? */
int auth; /**< Is client authenticated? */
unsigned long pid; /**< Process ID */
unsigned long uid; /**< User ID */
unsigned long magic; /**< Magic */
unsigned long iocs; /**< Ioctl count */
int idx; /**< Which client desired? */
int auth; /**< Is client authenticated? */
unsigned long pid; /**< Process ID */
unsigned long uid; /**< User ID */
unsigned long magic; /**< Magic */
unsigned long iocs; /**< Ioctl count */
} drm_client_t;
typedef enum {
_DRM_STAT_LOCK,
_DRM_STAT_OPENS,
@ -326,50 +313,45 @@ typedef enum {
_DRM_STAT_DMA, /**< DMA */
_DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
_DRM_STAT_MISSED /**< Missed DMA opportunity */
/* Add to the *END* of the list */
/* Add to the *END* of the list */
} drm_stat_type_t;
/**
* DRM_IOCTL_GET_STATS ioctl argument type.
*/
typedef struct drm_stats {
unsigned long count;
struct {
unsigned long value;
unsigned long value;
drm_stat_type_t type;
} data[15];
} drm_stats_t;
/**
* Hardware locking flags.
*/
typedef enum drm_lock_flags {
_DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
_DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
_DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
_DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
/* These *HALT* flags aren't supported yet
-- they will be used to support the
full-screen DGA-like mode. */
_DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
_DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
_DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
_DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
/* These *HALT* flags aren't supported yet
-- they will be used to support the
full-screen DGA-like mode. */
_DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
_DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
} drm_lock_flags_t;
/**
* DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
*
* \sa drmGetLock() and drmUnlock().
*/
typedef struct drm_lock {
int context;
int context;
drm_lock_flags_t flags;
} drm_lock_t;
/**
* DMA flags
*
@ -379,8 +361,8 @@ typedef struct drm_lock {
* \sa drm_dma.
*/
typedef enum drm_dma_flags {
/* Flags for DMA buffer dispatch */
_DRM_DMA_BLOCK = 0x01, /**<
/* Flags for DMA buffer dispatch */
_DRM_DMA_BLOCK = 0x01, /**<
* Block until buffer dispatched.
*
* \note The buffer may not yet have
@ -391,78 +373,72 @@ typedef enum drm_dma_flags {
* processed.
*/
_DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
_DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
_DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
/* Flags for DMA buffer request */
_DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
_DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
/* Flags for DMA buffer request */
_DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
_DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
} drm_dma_flags_t;
/**
* DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
*
* \sa drmAddBufs().
*/
typedef struct drm_buf_desc {
int count; /**< Number of buffers of this size */
int size; /**< Size in bytes */
int low_mark; /**< Low water mark */
int high_mark; /**< High water mark */
int count; /**< Number of buffers of this size */
int size; /**< Size in bytes */
int low_mark; /**< Low water mark */
int high_mark; /**< High water mark */
enum {
_DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
_DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
_DRM_SG_BUFFER = 0x04 /**< Scatter/gather memory buffer */
} flags;
_DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
_DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
_DRM_SG_BUFFER = 0x04 /**< Scatter/gather memory buffer */
} flags;
unsigned long agp_start; /**<
* Start address of where the AGP buffers are
* in the AGP aperture
*/
} drm_buf_desc_t;
/**
* DRM_IOCTL_INFO_BUFS ioctl argument type.
*/
typedef struct drm_buf_info {
int count; /**< Number of buffers described in list */
int count; /**< Number of buffers described in list */
drm_buf_desc_t __user *list; /**< List of buffer descriptions */
} drm_buf_info_t;
/**
* DRM_IOCTL_FREE_BUFS ioctl argument type.
*/
typedef struct drm_buf_free {
int count;
int __user *list;
int count;
int __user *list;
} drm_buf_free_t;
/**
* Buffer information
*
* \sa drm_buf_map.
*/
typedef struct drm_buf_pub {
int idx; /**< Index into the master buffer list */
int total; /**< Buffer size */
int used; /**< Amount of buffer in use (for DMA) */
void __user *address; /**< Address of buffer */
int idx; /**< Index into the master buffer list */
int total; /**< Buffer size */
int used; /**< Amount of buffer in use (for DMA) */
void __user *address; /**< Address of buffer */
} drm_buf_pub_t;
/**
* DRM_IOCTL_MAP_BUFS ioctl argument type.
*/
typedef struct drm_buf_map {
int count; /**< Length of the buffer list */
void __user *virtual; /**< Mmap'd area in user-virtual */
int count; /**< Length of the buffer list */
void __user *virtual; /**< Mmap'd area in user-virtual */
drm_buf_pub_t __user *list; /**< Buffer information */
} drm_buf_map_t;
/**
* DRM_IOCTL_DMA ioctl argument type.
*
@ -471,61 +447,55 @@ typedef struct drm_buf_map {
* \sa drmDMA().
*/
typedef struct drm_dma {
int context; /**< Context handle */
int send_count; /**< Number of buffers to send */
int __user *send_indices; /**< List of handles to buffers */
int __user *send_sizes; /**< Lengths of data to send */
int context; /**< Context handle */
int send_count; /**< Number of buffers to send */
int __user *send_indices; /**< List of handles to buffers */
int __user *send_sizes; /**< Lengths of data to send */
drm_dma_flags_t flags; /**< Flags */
int request_count; /**< Number of buffers requested */
int request_size; /**< Desired size for buffers */
int __user *request_indices; /**< Buffer information */
int __user *request_sizes;
int granted_count; /**< Number of buffers granted */
int request_count; /**< Number of buffers requested */
int request_size; /**< Desired size for buffers */
int __user *request_indices; /**< Buffer information */
int __user *request_sizes;
int granted_count; /**< Number of buffers granted */
} drm_dma_t;
typedef enum {
_DRM_CONTEXT_PRESERVED = 0x01,
_DRM_CONTEXT_2DONLY = 0x02
_DRM_CONTEXT_2DONLY = 0x02
} drm_ctx_flags_t;
/**
* DRM_IOCTL_ADD_CTX ioctl argument type.
*
* \sa drmCreateContext() and drmDestroyContext().
*/
typedef struct drm_ctx {
drm_context_t handle;
drm_context_t handle;
drm_ctx_flags_t flags;
} drm_ctx_t;
/**
* DRM_IOCTL_RES_CTX ioctl argument type.
*/
typedef struct drm_ctx_res {
int count;
drm_ctx_t __user *contexts;
int count;
drm_ctx_t __user *contexts;
} drm_ctx_res_t;
/**
* DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
*/
typedef struct drm_draw {
drm_drawable_t handle;
drm_drawable_t handle;
} drm_draw_t;
/**
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
*/
typedef struct drm_auth {
drm_magic_t magic;
drm_magic_t magic;
} drm_auth_t;
/**
* DRM_IOCTL_IRQ_BUSID ioctl argument type.
*
@ -538,24 +508,20 @@ typedef struct drm_irq_busid {
int funcnum; /**< function number */
} drm_irq_busid_t;
typedef enum {
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
} drm_vblank_seq_type_t;
#define _DRM_VBLANK_FLAGS_MASK _DRM_VBLANK_SIGNAL
struct drm_wait_vblank_request {
drm_vblank_seq_type_t type;
unsigned int sequence;
unsigned long signal;
};
struct drm_wait_vblank_reply {
drm_vblank_seq_type_t type;
unsigned int sequence;
@ -563,7 +529,6 @@ struct drm_wait_vblank_reply {
long tval_usec;
};
/**
* DRM_IOCTL_WAIT_VBLANK ioctl argument type.
*
@ -574,7 +539,6 @@ typedef union drm_wait_vblank {
struct drm_wait_vblank_reply reply;
} drm_wait_vblank_t;
/**
* DRM_IOCTL_AGP_ENABLE ioctl argument type.
*
@ -584,7 +548,6 @@ typedef struct drm_agp_mode {
unsigned long mode; /**< AGP mode */
} drm_agp_mode_t;
/**
* DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
*
@ -593,22 +556,20 @@ typedef struct drm_agp_mode {
typedef struct drm_agp_buffer {
unsigned long size; /**< In bytes -- will round to page boundary */
unsigned long handle; /**< Used for binding / unbinding */
unsigned long type; /**< Type of memory to allocate */
unsigned long physical; /**< Physical used by i810 */
unsigned long type; /**< Type of memory to allocate */
unsigned long physical; /**< Physical used by i810 */
} drm_agp_buffer_t;
/**
* DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
*
* \sa drmAgpBind() and drmAgpUnbind().
*/
typedef struct drm_agp_binding {
unsigned long handle; /**< From drm_agp_buffer */
unsigned long handle; /**< From drm_agp_buffer */
unsigned long offset; /**< In bytes -- will round to page boundary */
} drm_agp_binding_t;
/**
* DRM_IOCTL_AGP_INFO ioctl argument type.
*
@ -617,22 +578,21 @@ typedef struct drm_agp_binding {
* drmAgpVendorId() and drmAgpDeviceId().
*/
typedef struct drm_agp_info {
int agp_version_major;
int agp_version_minor;
unsigned long mode;
unsigned long aperture_base; /**< physical address */
unsigned long aperture_size; /**< bytes */
unsigned long memory_allowed; /**< bytes */
unsigned long memory_used;
int agp_version_major;
int agp_version_minor;
unsigned long mode;
unsigned long aperture_base; /**< physical address */
unsigned long aperture_size; /**< bytes */
unsigned long memory_allowed; /**< bytes */
unsigned long memory_used;
/** \name PCI information */
/*@{*/
/*@{ */
unsigned short id_vendor;
unsigned short id_device;
/*@}*/
/*@} */
} drm_agp_info_t;
/**
* DRM_IOCTL_SG_ALLOC ioctl argument type.
*/
@ -651,7 +611,6 @@ typedef struct drm_set_version {
int drm_dd_minor;
} drm_set_version_t;
/**
* \name Ioctls Definitions
*/
@ -719,7 +678,6 @@ typedef struct drm_set_version {
/*@}*/
/**
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x79.

View File

@ -38,7 +38,7 @@
#if defined(__alpha__)
#define SAREA_MAX 0x2000
#elif defined(__ia64__)
#define SAREA_MAX 0x10000 /* 64kB */
#define SAREA_MAX 0x10000 /* 64kB */
#else
/* Intel 830M driver needs at least 8k SAREA */
#define SAREA_MAX 0x2000
@ -51,28 +51,28 @@
/** SAREA drawable */
typedef struct drm_sarea_drawable {
unsigned int stamp;
unsigned int flags;
unsigned int stamp;
unsigned int flags;
} drm_sarea_drawable_t;
/** SAREA frame */
typedef struct drm_sarea_frame {
unsigned int x;
unsigned int y;
unsigned int width;
unsigned int height;
unsigned int fullscreen;
unsigned int x;
unsigned int y;
unsigned int width;
unsigned int height;
unsigned int fullscreen;
} drm_sarea_frame_t;
/** SAREA */
typedef struct drm_sarea {
/** first thing is always the DRM locking structure */
drm_hw_lock_t lock;
drm_hw_lock_t lock;
/** \todo Use readers/writer lock for drm_sarea::drawable_lock */
drm_hw_lock_t drawable_lock;
drm_sarea_drawable_t drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */
drm_sarea_frame_t frame; /**< frame */
drm_context_t dummy_context;
drm_hw_lock_t drawable_lock;
drm_sarea_drawable_t drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */
drm_sarea_frame_t frame; /**< frame */
drm_context_t dummy_context;
} drm_sarea_t;
#endif /* _DRM_SAREA_H_ */
#endif /* _DRM_SAREA_H_ */

View File

@ -83,7 +83,7 @@ int i915_dma_cleanup(drm_device_t * dev)
* is freed, it's too late.
*/
if (dev->irq)
drm_irq_uninstall (dev);
drm_irq_uninstall(dev);
if (dev->dev_private) {
drm_i915_private_t *dev_priv =
@ -108,8 +108,8 @@ int i915_dma_cleanup(drm_device_t * dev)
I915_WRITE(0x02080, 0x1ffff000);
}
drm_free (dev->dev_private, sizeof(drm_i915_private_t),
DRM_MEM_DRIVER);
drm_free(dev->dev_private, sizeof(drm_i915_private_t),
DRM_MEM_DRIVER);
dev->dev_private = NULL;
}
@ -254,8 +254,8 @@ int i915_dma_init(DRM_IOCTL_ARGS)
switch (init.func) {
case I915_INIT_DMA:
dev_priv = drm_alloc (sizeof(drm_i915_private_t),
DRM_MEM_DRIVER);
dev_priv = drm_alloc(sizeof(drm_i915_private_t),
DRM_MEM_DRIVER);
if (dev_priv == NULL)
return DRM_ERR(ENOMEM);
retcode = i915_initialize(dev, dev_priv, &init);
@ -732,7 +732,7 @@ int i915_setparam(DRM_IOCTL_ARGS)
return 0;
}
void i915_driver_pretakedown(drm_device_t *dev)
void i915_driver_pretakedown(drm_device_t * dev)
{
if (dev->dev_private) {
drm_i915_private_t *dev_priv = dev->dev_private;
@ -741,7 +741,7 @@ void i915_driver_pretakedown(drm_device_t *dev)
i915_dma_cleanup(dev);
}
void i915_driver_prerelease(drm_device_t *dev, DRMFILE filp)
void i915_driver_prerelease(drm_device_t * dev, DRMFILE filp)
{
if (dev->dev_private) {
drm_i915_private_t *dev_priv = dev->dev_private;

View File

@ -88,8 +88,8 @@ extern int i915_getparam(DRM_IOCTL_ARGS);
extern int i915_setparam(DRM_IOCTL_ARGS);
extern int i915_cmdbuffer(DRM_IOCTL_ARGS);
extern void i915_kernel_lost_context(drm_device_t * dev);
extern void i915_driver_pretakedown(drm_device_t *dev);
extern void i915_driver_prerelease(drm_device_t *dev, DRMFILE filp);
extern void i915_driver_pretakedown(drm_device_t * dev);
extern void i915_driver_prerelease(drm_device_t * dev, DRMFILE filp);
/* i915_irq.c */
extern int i915_irq_emit(DRM_IOCTL_ARGS);
@ -98,9 +98,9 @@ extern int i915_wait_irq(drm_device_t * dev, int irq_nr);
extern int i915_emit_irq(drm_device_t * dev);
extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
extern void i915_driver_irq_preinstall(drm_device_t *dev);
extern void i915_driver_irq_postinstall(drm_device_t *dev);
extern void i915_driver_irq_uninstall(drm_device_t *dev);
extern void i915_driver_irq_preinstall(drm_device_t * dev);
extern void i915_driver_irq_postinstall(drm_device_t * dev);
extern void i915_driver_irq_uninstall(drm_device_t * dev);
/* i915_mem.c */
extern int i915_mem_alloc(DRM_IOCTL_ARGS);

View File

@ -74,7 +74,8 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
{
/* Maybe cut off the start of an existing block */
if (start > p->start) {
struct mem_block *newblock = drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
struct mem_block *newblock =
drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
if (!newblock)
goto out;
newblock->start = start;
@ -90,7 +91,8 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
/* Maybe cut off the end of an existing block */
if (size < p->size) {
struct mem_block *newblock = drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
struct mem_block *newblock =
drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
if (!newblock)
goto out;
newblock->start = start + size;

File diff suppressed because it is too large Load Diff

View File

@ -33,7 +33,6 @@
#ifndef __MACH64_DRM_H__
#define __MACH64_DRM_H__
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (mach64_sarea.h)
*/
@ -57,7 +56,7 @@
#define MACH64_UPLOAD_TEXTURE 0x0200
#define MACH64_UPLOAD_TEX0IMAGE 0x0400
#define MACH64_UPLOAD_TEX1IMAGE 0x0800
#define MACH64_UPLOAD_CLIPRECTS 0x1000 /* handled client-side */
#define MACH64_UPLOAD_CLIPRECTS 0x1000 /* handled client-side */
#define MACH64_UPLOAD_CONTEXT 0x00ff
#define MACH64_UPLOAD_ALL 0x1fff
@ -78,7 +77,6 @@
*/
#define MACH64_NR_SAREA_CLIPRECTS 8
#define MACH64_CARD_HEAP 0
#define MACH64_AGP_HEAP 1
#define MACH64_NR_TEX_HEAPS 2
@ -90,7 +88,7 @@
#define MACH64_NR_CONTEXT_REGS 15
#define MACH64_NR_TEXTURE_REGS 4
#endif /* __MACH64_SAREA_DEFINES__ */
#endif /* __MACH64_SAREA_DEFINES__ */
typedef struct {
unsigned int dst_off_pitch;
@ -140,12 +138,12 @@ typedef struct drm_mach64_sarea {
/* Texture memory LRU.
*/
drm_tex_region_t tex_list[MACH64_NR_TEX_HEAPS][MACH64_NR_TEX_REGIONS+1];
drm_tex_region_t tex_list[MACH64_NR_TEX_HEAPS][MACH64_NR_TEX_REGIONS +
1];
unsigned int tex_age[MACH64_NR_TEX_HEAPS];
int ctx_owner;
} drm_mach64_sarea_t;
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (mach64_common.h)
*/
@ -193,11 +191,10 @@ typedef struct drm_mach64_sarea {
#define MACH64_PRIM_QUAD_STRIP 0x00000008
#define MACH64_PRIM_POLYGON 0x00000009
typedef enum _drm_mach64_dma_mode_t {
MACH64_MODE_DMA_ASYNC,
MACH64_MODE_DMA_SYNC,
MACH64_MODE_MMIO
MACH64_MODE_DMA_ASYNC,
MACH64_MODE_DMA_SYNC,
MACH64_MODE_MMIO
} drm_mach64_dma_mode_t;
typedef struct drm_mach64_init {
@ -233,9 +230,9 @@ typedef struct drm_mach64_clear {
typedef struct drm_mach64_vertex {
int prim;
void *buf; /* Address of vertex buffer */
unsigned long used; /* Number of bytes in buffer */
int discard; /* Client finished with buffer? */
void *buf; /* Address of vertex buffer */
unsigned long used; /* Number of bytes in buffer */
int discard; /* Client finished with buffer? */
} drm_mach64_vertex_t;
typedef struct drm_mach64_blit {

View File

@ -49,45 +49,45 @@
/* FIXME: remove these when not needed */
/* Development driver options */
#define MACH64_EXTRA_CHECKING 0 /* Extra sanity checks for DMA/freelist management */
#define MACH64_VERBOSE 0 /* Verbose debugging output */
#define MACH64_EXTRA_CHECKING 0 /* Extra sanity checks for DMA/freelist management */
#define MACH64_VERBOSE 0 /* Verbose debugging output */
typedef struct drm_mach64_freelist {
struct list_head list; /* List pointers for free_list, placeholders, or pending list */
drm_buf_t *buf; /* Pointer to the buffer */
int discard; /* This flag is set when we're done (re)using a buffer */
u32 ring_ofs; /* dword offset in ring of last descriptor for this buffer */
struct list_head list; /* List pointers for free_list, placeholders, or pending list */
drm_buf_t *buf; /* Pointer to the buffer */
int discard; /* This flag is set when we're done (re)using a buffer */
u32 ring_ofs; /* dword offset in ring of last descriptor for this buffer */
} drm_mach64_freelist_t;
typedef struct drm_mach64_descriptor_ring {
dma_addr_t handle; /* handle (bus address) of ring returned by pci_alloc_consistent() */
void *start; /* write pointer (cpu address) to start of descriptor ring */
u32 start_addr; /* bus address of beginning of descriptor ring */
int size; /* size of ring in bytes */
dma_addr_t handle; /* handle (bus address) of ring returned by pci_alloc_consistent() */
void *start; /* write pointer (cpu address) to start of descriptor ring */
u32 start_addr; /* bus address of beginning of descriptor ring */
int size; /* size of ring in bytes */
u32 head_addr; /* bus address of descriptor ring head */
u32 head; /* dword offset of descriptor ring head */
u32 tail; /* dword offset of descriptor ring tail */
u32 tail_mask; /* mask used to wrap ring */
int space; /* number of free bytes in ring */
u32 head_addr; /* bus address of descriptor ring head */
u32 head; /* dword offset of descriptor ring head */
u32 tail; /* dword offset of descriptor ring tail */
u32 tail_mask; /* mask used to wrap ring */
int space; /* number of free bytes in ring */
} drm_mach64_descriptor_ring_t;
typedef struct drm_mach64_private {
drm_mach64_sarea_t *sarea_priv;
int is_pci;
drm_mach64_dma_mode_t driver_mode; /* Async DMA, sync DMA, or MMIO */
drm_mach64_dma_mode_t driver_mode; /* Async DMA, sync DMA, or MMIO */
int usec_timeout; /* Timeout for the wait functions */
int usec_timeout; /* Timeout for the wait functions */
drm_mach64_descriptor_ring_t ring; /* DMA descriptor table (ring buffer) */
int ring_running; /* Is bus mastering is enabled */
drm_mach64_descriptor_ring_t ring; /* DMA descriptor table (ring buffer) */
int ring_running; /* Is bus mastering is enabled */
struct list_head free_list; /* Free-list head */
struct list_head placeholders; /* Placeholder list for buffers held by clients */
struct list_head pending; /* Buffers pending completion */
struct list_head free_list; /* Free-list head */
struct list_head placeholders; /* Placeholder list for buffers held by clients */
struct list_head pending; /* Buffers pending completion */
u32 frame_ofs[MACH64_MAX_QUEUED_FRAMES]; /* dword ring offsets of most recent frame swaps */
u32 frame_ofs[MACH64_MAX_QUEUED_FRAMES]; /* dword ring offsets of most recent frame swaps */
unsigned int fb_bpp;
unsigned int front_offset, front_pitch;
@ -104,48 +104,49 @@ typedef struct drm_mach64_private {
drm_local_map_t *fb;
drm_local_map_t *mmio;
drm_local_map_t *ring_map;
drm_local_map_t *dev_buffers; /* this is a pointer to a structure in dev */
drm_local_map_t *dev_buffers; /* this is a pointer to a structure in dev */
drm_local_map_t *agp_textures;
} drm_mach64_private_t;
/* mach64_dma.c */
extern int mach64_dma_init( DRM_IOCTL_ARGS );
extern int mach64_dma_idle( DRM_IOCTL_ARGS );
extern int mach64_dma_flush( DRM_IOCTL_ARGS );
extern int mach64_engine_reset( DRM_IOCTL_ARGS );
extern int mach64_dma_buffers( DRM_IOCTL_ARGS );
extern void mach64_driver_pretakedown(drm_device_t *dev);
extern int mach64_dma_init(DRM_IOCTL_ARGS);
extern int mach64_dma_idle(DRM_IOCTL_ARGS);
extern int mach64_dma_flush(DRM_IOCTL_ARGS);
extern int mach64_engine_reset(DRM_IOCTL_ARGS);
extern int mach64_dma_buffers(DRM_IOCTL_ARGS);
extern void mach64_driver_pretakedown(drm_device_t * dev);
extern int mach64_init_freelist( drm_device_t *dev );
extern void mach64_destroy_freelist( drm_device_t *dev );
extern drm_buf_t *mach64_freelist_get( drm_mach64_private_t *dev_priv );
extern int mach64_init_freelist(drm_device_t * dev);
extern void mach64_destroy_freelist(drm_device_t * dev);
extern drm_buf_t *mach64_freelist_get(drm_mach64_private_t * dev_priv);
extern int mach64_do_wait_for_fifo( drm_mach64_private_t *dev_priv,
int entries );
extern int mach64_do_wait_for_idle( drm_mach64_private_t *dev_priv );
extern int mach64_wait_ring( drm_mach64_private_t *dev_priv, int n );
extern int mach64_do_dispatch_pseudo_dma( drm_mach64_private_t *dev_priv );
extern int mach64_do_release_used_buffers( drm_mach64_private_t *dev_priv );
extern void mach64_dump_engine_info( drm_mach64_private_t *dev_priv );
extern void mach64_dump_ring_info( drm_mach64_private_t *dev_priv );
extern int mach64_do_engine_reset( drm_mach64_private_t *dev_priv );
extern int mach64_do_wait_for_fifo(drm_mach64_private_t * dev_priv,
int entries);
extern int mach64_do_wait_for_idle(drm_mach64_private_t * dev_priv);
extern int mach64_wait_ring(drm_mach64_private_t * dev_priv, int n);
extern int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t * dev_priv);
extern int mach64_do_release_used_buffers(drm_mach64_private_t * dev_priv);
extern void mach64_dump_engine_info(drm_mach64_private_t * dev_priv);
extern void mach64_dump_ring_info(drm_mach64_private_t * dev_priv);
extern int mach64_do_engine_reset(drm_mach64_private_t * dev_priv);
extern int mach64_do_dma_idle( drm_mach64_private_t *dev_priv );
extern int mach64_do_dma_flush( drm_mach64_private_t *dev_priv );
extern int mach64_do_cleanup_dma( drm_device_t *dev );
extern int mach64_do_dma_idle(drm_mach64_private_t * dev_priv);
extern int mach64_do_dma_flush(drm_mach64_private_t * dev_priv);
extern int mach64_do_cleanup_dma(drm_device_t * dev);
/* mach64_state.c */
extern int mach64_dma_clear( DRM_IOCTL_ARGS );
extern int mach64_dma_swap( DRM_IOCTL_ARGS );
extern int mach64_dma_vertex( DRM_IOCTL_ARGS );
extern int mach64_dma_blit( DRM_IOCTL_ARGS );
extern int mach64_get_param( DRM_IOCTL_ARGS );
extern int mach64_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence);
extern int mach64_dma_clear(DRM_IOCTL_ARGS);
extern int mach64_dma_swap(DRM_IOCTL_ARGS);
extern int mach64_dma_vertex(DRM_IOCTL_ARGS);
extern int mach64_dma_blit(DRM_IOCTL_ARGS);
extern int mach64_get_param(DRM_IOCTL_ARGS);
extern int mach64_driver_vblank_wait(drm_device_t * dev,
unsigned int *sequence);
extern irqreturn_t mach64_driver_irq_handler( DRM_IRQ_ARGS );
extern void mach64_driver_irq_preinstall( drm_device_t *dev );
extern void mach64_driver_irq_postinstall( drm_device_t *dev );
extern void mach64_driver_irq_uninstall( drm_device_t *dev );
extern irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS);
extern void mach64_driver_irq_preinstall(drm_device_t * dev);
extern void mach64_driver_irq_postinstall(drm_device_t * dev);
extern void mach64_driver_irq_uninstall(drm_device_t * dev);
/* ================================================================
* Registers
@ -155,7 +156,6 @@ extern void mach64_driver_irq_uninstall( drm_device_t *dev );
#define MACH64_AGP_CNTL 0x014c
#define MACH64_ALPHA_TST_CNTL 0x0550
#define MACH64_DSP_CONFIG 0x0420
#define MACH64_DSP_ON_OFF 0x0424
#define MACH64_EXT_MEM_CNTL 0x04ac
@ -165,7 +165,6 @@ extern void mach64_driver_irq_uninstall( drm_device_t *dev );
#define MACH64_MEM_BUF_CNTL 0x042c
#define MACH64_MEM_CNTL 0x04b0
#define MACH64_BM_ADDR 0x0648
#define MACH64_BM_COMMAND 0x0188
#define MACH64_BM_DATA 0x0648
@ -390,17 +389,17 @@ extern void mach64_driver_irq_uninstall( drm_device_t *dev );
# define MACH64_CRTC_VBLANK_INT (1 << 2)
# define MACH64_CRTC_VLINE_INT_EN (1 << 3)
# define MACH64_CRTC_VLINE_INT (1 << 4)
# define MACH64_CRTC_VLINE_SYNC (1 << 5) /* 0=even, 1=odd */
# define MACH64_CRTC_FRAME (1 << 6) /* 0=even, 1=odd */
# define MACH64_CRTC_VLINE_SYNC (1 << 5) /* 0=even, 1=odd */
# define MACH64_CRTC_FRAME (1 << 6) /* 0=even, 1=odd */
# define MACH64_CRTC_SNAPSHOT_INT_EN (1 << 7)
# define MACH64_CRTC_SNAPSHOT_INT (1 << 8)
# define MACH64_CRTC_I2C_INT_EN (1 << 9)
# define MACH64_CRTC_I2C_INT (1 << 10)
# define MACH64_CRTC2_VBLANK (1 << 11) /* LT Pro */
# define MACH64_CRTC2_VBLANK_INT_EN (1 << 12) /* LT Pro */
# define MACH64_CRTC2_VBLANK_INT (1 << 13) /* LT Pro */
# define MACH64_CRTC2_VLINE_INT_EN (1 << 14) /* LT Pro */
# define MACH64_CRTC2_VLINE_INT (1 << 15) /* LT Pro */
# define MACH64_CRTC2_VBLANK (1 << 11) /* LT Pro */
# define MACH64_CRTC2_VBLANK_INT_EN (1 << 12) /* LT Pro */
# define MACH64_CRTC2_VBLANK_INT (1 << 13) /* LT Pro */
# define MACH64_CRTC2_VLINE_INT_EN (1 << 14) /* LT Pro */
# define MACH64_CRTC2_VLINE_INT (1 << 15) /* LT Pro */
# define MACH64_CRTC_CAPBUF0_INT_EN (1 << 16)
# define MACH64_CRTC_CAPBUF0_INT (1 << 17)
# define MACH64_CRTC_CAPBUF1_INT_EN (1 << 18)
@ -413,9 +412,9 @@ extern void mach64_driver_irq_uninstall( drm_device_t *dev );
# define MACH64_CRTC_BUSMASTER_EOL_INT (1 << 25)
# define MACH64_CRTC_GP_INT_EN (1 << 26)
# define MACH64_CRTC_GP_INT (1 << 27)
# define MACH64_CRTC2_VLINE_SYNC (1 << 28) /* LT Pro */ /* 0=even, 1=odd */
# define MACH64_CRTC_SNAPSHOT2_INT_EN (1 << 29) /* LT Pro */
# define MACH64_CRTC_SNAPSHOT2_INT (1 << 30) /* LT Pro */
# define MACH64_CRTC2_VLINE_SYNC (1 << 28) /* LT Pro */ /* 0=even, 1=odd */
# define MACH64_CRTC_SNAPSHOT2_INT_EN (1 << 29) /* LT Pro */
# define MACH64_CRTC_SNAPSHOT2_INT (1 << 30) /* LT Pro */
# define MACH64_CRTC_VBLANK2_INT (1 << 31)
# define MACH64_CRTC_INT_ENS \
( \
@ -468,7 +467,6 @@ extern void mach64_driver_irq_uninstall( drm_device_t *dev );
#define MACH64_READ(reg) DRM_READ32(dev_priv->mmio, (reg) )
#define MACH64_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio, (reg), (val) )
#define DWMREG0 0x0400
#define DWMREG0_END 0x07ff
#define DWMREG1 0x0000
@ -495,251 +493,243 @@ extern void mach64_driver_irq_uninstall( drm_device_t *dev );
* The descriptor fields are loaded into the read-only
* BM_* system bus master registers during a bus-master operation
*/
#define MACH64_DMA_FRAME_BUF_OFFSET 0 /* BM_FRAME_BUF_OFFSET */
#define MACH64_DMA_SYS_MEM_ADDR 1 /* BM_SYSTEM_MEM_ADDR */
#define MACH64_DMA_COMMAND 2 /* BM_COMMAND */
#define MACH64_DMA_RESERVED 3 /* BM_STATUS */
#define MACH64_DMA_FRAME_BUF_OFFSET 0 /* BM_FRAME_BUF_OFFSET */
#define MACH64_DMA_SYS_MEM_ADDR 1 /* BM_SYSTEM_MEM_ADDR */
#define MACH64_DMA_COMMAND 2 /* BM_COMMAND */
#define MACH64_DMA_RESERVED 3 /* BM_STATUS */
/* BM_COMMAND descriptor field flags */
#define MACH64_DMA_HOLD_OFFSET (1<<30) /* Don't increment DMA_FRAME_BUF_OFFSET */
#define MACH64_DMA_EOL (1<<31) /* End of descriptor list flag */
#define MACH64_DMA_CHUNKSIZE 0x1000 /* 4kB per DMA descriptor */
#define MACH64_APERTURE_OFFSET 0x7ff800 /* frame-buffer offset for gui-masters */
#define MACH64_DMA_HOLD_OFFSET (1<<30) /* Don't increment DMA_FRAME_BUF_OFFSET */
#define MACH64_DMA_EOL (1<<31) /* End of descriptor list flag */
#define MACH64_DMA_CHUNKSIZE 0x1000 /* 4kB per DMA descriptor */
#define MACH64_APERTURE_OFFSET 0x7ff800 /* frame-buffer offset for gui-masters */
/* ================================================================
* Misc helper macros
*/
static __inline__ void mach64_set_dma_eol( volatile u32 * addr )
static __inline__ void mach64_set_dma_eol(volatile u32 * addr)
{
#if defined(__i386__)
int nr = 31;
/* Taken from include/asm-i386/bitops.h linux header */
__asm__ __volatile__( "lock;"
"btsl %1,%0"
:"=m" (*addr)
:"Ir" (nr));
__asm__ __volatile__("lock;" "btsl %1,%0":"=m"(*addr)
:"Ir"(nr));
#elif defined(__powerpc__)
u32 old;
u32 mask = cpu_to_le32( MACH64_DMA_EOL );
u32 mask = cpu_to_le32(MACH64_DMA_EOL);
/* Taken from the include/asm-ppc/bitops.h linux header */
__asm__ __volatile__("\n\
1: lwarx %0,0,%3 \n\
or %0,%0,%2 \n\
stwcx. %0,0,%3 \n\
bne- 1b"
: "=&r" (old), "=m" (*addr)
: "r" (mask), "r" (addr), "m" (*addr)
: "cc");
bne- 1b":"=&r"(old), "=m"(*addr)
:"r"(mask), "r"(addr), "m"(*addr)
:"cc");
#elif defined(__alpha__)
u32 temp;
u32 mask = MACH64_DMA_EOL;
/* Taken from the include/asm-alpha/bitops.h linux header */
__asm__ __volatile__(
"1: ldl_l %0,%3\n"
" bis %0,%2,%0\n"
" stl_c %0,%1\n"
" beq %0,2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous"
:"=&r" (temp), "=m" (*addr)
:"Ir" (mask), "m" (*addr));
__asm__ __volatile__("1: ldl_l %0,%3\n"
" bis %0,%2,%0\n"
" stl_c %0,%1\n"
" beq %0,2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous":"=&r"(temp), "=m"(*addr)
:"Ir"(mask), "m"(*addr));
#else
u32 mask = cpu_to_le32( MACH64_DMA_EOL );
u32 mask = cpu_to_le32(MACH64_DMA_EOL);
*addr |= mask;
#endif
}
static __inline__ void mach64_clear_dma_eol( volatile u32 * addr )
static __inline__ void mach64_clear_dma_eol(volatile u32 * addr)
{
#if defined(__i386__)
int nr = 31;
/* Taken from include/asm-i386/bitops.h linux header */
__asm__ __volatile__( "lock;"
"btrl %1,%0"
:"=m" (*addr)
:"Ir" (nr));
__asm__ __volatile__("lock;" "btrl %1,%0":"=m"(*addr)
:"Ir"(nr));
#elif defined(__powerpc__)
u32 old;
u32 mask = cpu_to_le32( MACH64_DMA_EOL );
u32 mask = cpu_to_le32(MACH64_DMA_EOL);
/* Taken from the include/asm-ppc/bitops.h linux header */
__asm__ __volatile__("\n\
1: lwarx %0,0,%3 \n\
andc %0,%0,%2 \n\
stwcx. %0,0,%3 \n\
bne- 1b"
: "=&r" (old), "=m" (*addr)
: "r" (mask), "r" (addr), "m" (*addr)
: "cc");
bne- 1b":"=&r"(old), "=m"(*addr)
:"r"(mask), "r"(addr), "m"(*addr)
:"cc");
#elif defined(__alpha__)
u32 temp;
u32 mask = ~MACH64_DMA_EOL;
/* Taken from the include/asm-alpha/bitops.h linux header */
__asm__ __volatile__(
"1: ldl_l %0,%3\n"
" and %0,%2,%0\n"
" stl_c %0,%1\n"
" beq %0,2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous"
:"=&r" (temp), "=m" (*addr)
:"Ir" (mask), "m" (*addr));
__asm__ __volatile__("1: ldl_l %0,%3\n"
" and %0,%2,%0\n"
" stl_c %0,%1\n"
" beq %0,2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous":"=&r"(temp), "=m"(*addr)
:"Ir"(mask), "m"(*addr));
#else
u32 mask = cpu_to_le32( ~MACH64_DMA_EOL );
u32 mask = cpu_to_le32(~MACH64_DMA_EOL);
*addr &= mask;
#endif
}
static __inline__ void mach64_ring_start( drm_mach64_private_t *dev_priv )
static __inline__ void mach64_ring_start(drm_mach64_private_t * dev_priv)
{
drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
DRM_DEBUG( "%s: head_addr: 0x%08x head: %d tail: %d space: %d\n",
__FUNCTION__,
ring->head_addr, ring->head, ring->tail, ring->space );
DRM_DEBUG("%s: head_addr: 0x%08x head: %d tail: %d space: %d\n",
__FUNCTION__,
ring->head_addr, ring->head, ring->tail, ring->space);
if ( mach64_do_wait_for_idle( dev_priv ) < 0 ) {
mach64_do_engine_reset( dev_priv );
if (mach64_do_wait_for_idle(dev_priv) < 0) {
mach64_do_engine_reset(dev_priv);
}
if (dev_priv->driver_mode != MACH64_MODE_MMIO ) {
if (dev_priv->driver_mode != MACH64_MODE_MMIO) {
/* enable bus mastering and block 1 registers */
MACH64_WRITE( MACH64_BUS_CNTL,
( MACH64_READ(MACH64_BUS_CNTL) & ~MACH64_BUS_MASTER_DIS )
| MACH64_BUS_EXT_REG_EN );
mach64_do_wait_for_idle( dev_priv );
MACH64_WRITE(MACH64_BUS_CNTL,
(MACH64_READ(MACH64_BUS_CNTL) &
~MACH64_BUS_MASTER_DIS)
| MACH64_BUS_EXT_REG_EN);
mach64_do_wait_for_idle(dev_priv);
}
/* reset descriptor table ring head */
MACH64_WRITE( MACH64_BM_GUI_TABLE_CMD,
ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB );
MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
dev_priv->ring_running = 1;
}
static __inline__ void mach64_ring_resume( drm_mach64_private_t *dev_priv,
drm_mach64_descriptor_ring_t *ring )
static __inline__ void mach64_ring_resume(drm_mach64_private_t * dev_priv,
drm_mach64_descriptor_ring_t * ring)
{
DRM_DEBUG( "%s: head_addr: 0x%08x head: %d tail: %d space: %d\n",
__FUNCTION__,
ring->head_addr, ring->head, ring->tail, ring->space );
DRM_DEBUG("%s: head_addr: 0x%08x head: %d tail: %d space: %d\n",
__FUNCTION__,
ring->head_addr, ring->head, ring->tail, ring->space);
/* reset descriptor table ring head */
MACH64_WRITE( MACH64_BM_GUI_TABLE_CMD,
ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB );
MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
if ( dev_priv->driver_mode == MACH64_MODE_MMIO ) {
mach64_do_dispatch_pseudo_dma( dev_priv );
if (dev_priv->driver_mode == MACH64_MODE_MMIO) {
mach64_do_dispatch_pseudo_dma(dev_priv);
} else {
/* enable GUI bus mastering, and sync the bus master to the GUI */
MACH64_WRITE( MACH64_SRC_CNTL,
MACH64_SRC_BM_ENABLE | MACH64_SRC_BM_SYNC |
MACH64_SRC_BM_OP_SYSTEM_TO_REG );
MACH64_WRITE(MACH64_SRC_CNTL,
MACH64_SRC_BM_ENABLE | MACH64_SRC_BM_SYNC |
MACH64_SRC_BM_OP_SYSTEM_TO_REG);
/* kick off the transfer */
MACH64_WRITE( MACH64_DST_HEIGHT_WIDTH, 0 );
if ( dev_priv->driver_mode == MACH64_MODE_DMA_SYNC ) {
if ( (mach64_do_wait_for_idle( dev_priv )) < 0 ) {
DRM_ERROR( "%s: idle failed, resetting engine\n",
__FUNCTION__);
mach64_dump_engine_info( dev_priv );
mach64_do_engine_reset( dev_priv );
MACH64_WRITE(MACH64_DST_HEIGHT_WIDTH, 0);
if (dev_priv->driver_mode == MACH64_MODE_DMA_SYNC) {
if ((mach64_do_wait_for_idle(dev_priv)) < 0) {
DRM_ERROR("%s: idle failed, resetting engine\n",
__FUNCTION__);
mach64_dump_engine_info(dev_priv);
mach64_do_engine_reset(dev_priv);
return;
}
mach64_do_release_used_buffers( dev_priv );
mach64_do_release_used_buffers(dev_priv);
}
}
}
static __inline__ void mach64_ring_tick( drm_mach64_private_t *dev_priv,
drm_mach64_descriptor_ring_t *ring )
static __inline__ void mach64_ring_tick(drm_mach64_private_t * dev_priv,
drm_mach64_descriptor_ring_t * ring)
{
DRM_DEBUG( "%s: head_addr: 0x%08x head: %d tail: %d space: %d\n",
__FUNCTION__,
ring->head_addr, ring->head, ring->tail, ring->space );
DRM_DEBUG("%s: head_addr: 0x%08x head: %d tail: %d space: %d\n",
__FUNCTION__,
ring->head_addr, ring->head, ring->tail, ring->space);
if ( !dev_priv->ring_running ) {
mach64_ring_start( dev_priv );
if (!dev_priv->ring_running) {
mach64_ring_start(dev_priv);
if ( ring->head != ring->tail ) {
mach64_ring_resume( dev_priv, ring );
if (ring->head != ring->tail) {
mach64_ring_resume(dev_priv, ring);
}
} else {
/* GUI_ACTIVE must be read before BM_GUI_TABLE to
* correctly determine the ring head
*/
int gui_active = MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE;
int gui_active =
MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE;
ring->head_addr = MACH64_READ(MACH64_BM_GUI_TABLE) & 0xfffffff0;
if ( gui_active ) {
if (gui_active) {
/* If not idle, BM_GUI_TABLE points one descriptor
* past the current head
*/
if ( ring->head_addr == ring->start_addr ) {
if (ring->head_addr == ring->start_addr) {
ring->head_addr += ring->size;
}
ring->head_addr -= 4 * sizeof(u32);
}
if( ring->head_addr < ring->start_addr ||
ring->head_addr >= ring->start_addr + ring->size ) {
DRM_ERROR( "bad ring head address: 0x%08x\n", ring->head_addr );
mach64_dump_ring_info( dev_priv );
mach64_do_engine_reset( dev_priv );
if (ring->head_addr < ring->start_addr ||
ring->head_addr >= ring->start_addr + ring->size) {
DRM_ERROR("bad ring head address: 0x%08x\n",
ring->head_addr);
mach64_dump_ring_info(dev_priv);
mach64_do_engine_reset(dev_priv);
return;
}
ring->head = (ring->head_addr - ring->start_addr) / sizeof(u32);
if ( !gui_active && ring->head != ring->tail ) {
mach64_ring_resume( dev_priv, ring );
if (!gui_active && ring->head != ring->tail) {
mach64_ring_resume(dev_priv, ring);
}
}
}
static __inline__ void mach64_ring_stop( drm_mach64_private_t *dev_priv )
static __inline__ void mach64_ring_stop(drm_mach64_private_t * dev_priv)
{
DRM_DEBUG( "%s: head_addr: 0x%08x head: %d tail: %d space: %d\n",
__FUNCTION__,
dev_priv->ring.head_addr, dev_priv->ring.head,
dev_priv->ring.tail, dev_priv->ring.space );
DRM_DEBUG("%s: head_addr: 0x%08x head: %d tail: %d space: %d\n",
__FUNCTION__,
dev_priv->ring.head_addr, dev_priv->ring.head,
dev_priv->ring.tail, dev_priv->ring.space);
/* restore previous SRC_CNTL to disable busmastering */
mach64_do_wait_for_fifo( dev_priv, 1 );
MACH64_WRITE( MACH64_SRC_CNTL, 0 );
mach64_do_wait_for_fifo(dev_priv, 1);
MACH64_WRITE(MACH64_SRC_CNTL, 0);
/* disable busmastering but keep the block 1 registers enabled */
mach64_do_wait_for_idle( dev_priv );
MACH64_WRITE( MACH64_BUS_CNTL, MACH64_READ( MACH64_BUS_CNTL )
| MACH64_BUS_MASTER_DIS | MACH64_BUS_EXT_REG_EN );
mach64_do_wait_for_idle(dev_priv);
MACH64_WRITE(MACH64_BUS_CNTL, MACH64_READ(MACH64_BUS_CNTL)
| MACH64_BUS_MASTER_DIS | MACH64_BUS_EXT_REG_EN);
dev_priv->ring_running = 0;
}
static __inline__ void
mach64_update_ring_snapshot( drm_mach64_private_t *dev_priv )
mach64_update_ring_snapshot(drm_mach64_private_t * dev_priv)
{
drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
DRM_DEBUG( "%s\n", __FUNCTION__ );
DRM_DEBUG("%s\n", __FUNCTION__);
mach64_ring_tick( dev_priv, ring );
mach64_ring_tick(dev_priv, ring);
ring->space = (ring->head - ring->tail) * sizeof(u32);
if ( ring->space <= 0 ) {
if (ring->space <= 0) {
ring->space += ring->size;
}
}
@ -797,7 +787,6 @@ do { \
mach64_ring_tick( dev_priv, &(dev_priv)->ring ); \
} while (0)
/* ================================================================
* DMA macros
*/
@ -816,9 +805,10 @@ do { \
#define GETRINGOFFSET() (_entry->ring_ofs)
static __inline__ int mach64_find_pending_buf_entry ( drm_mach64_private_t *dev_priv,
drm_mach64_freelist_t **entry,
drm_buf_t *buf )
static __inline__ int mach64_find_pending_buf_entry(drm_mach64_private_t *
dev_priv,
drm_mach64_freelist_t **
entry, drm_buf_t * buf)
{
struct list_head *ptr;
#if MACH64_EXTRA_CHECKING
@ -1050,4 +1040,4 @@ do { \
ADVANCE_RING(); \
} while(0)
#endif /* __MACH64_DRV_H__ */
#endif /* __MACH64_DRV_H__ */

View File

@ -39,36 +39,37 @@
#include "mach64_drm.h"
#include "mach64_drv.h"
irqreturn_t mach64_driver_irq_handler( DRM_IRQ_ARGS )
irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS)
{
drm_device_t *dev = (drm_device_t *) arg;
drm_mach64_private_t *dev_priv =
(drm_mach64_private_t *)dev->dev_private;
(drm_mach64_private_t *) dev->dev_private;
int status;
status = MACH64_READ( MACH64_CRTC_INT_CNTL );
status = MACH64_READ(MACH64_CRTC_INT_CNTL);
/* VBLANK interrupt */
if (status & MACH64_CRTC_VBLANK_INT) {
/* Mask off all interrupt ack bits before setting the ack bit, since
* there may be other handlers outside the DRM.
*
* NOTE: On mach64, you need to keep the enable bits set when doing
* the ack, despite what the docs say about not acking and enabling
* in a single write.
*/
MACH64_WRITE( MACH64_CRTC_INT_CNTL, (status & ~MACH64_CRTC_INT_ACKS)
| MACH64_CRTC_VBLANK_INT );
/* Mask off all interrupt ack bits before setting the ack bit, since
* there may be other handlers outside the DRM.
*
* NOTE: On mach64, you need to keep the enable bits set when doing
* the ack, despite what the docs say about not acking and enabling
* in a single write.
*/
MACH64_WRITE(MACH64_CRTC_INT_CNTL,
(status & ~MACH64_CRTC_INT_ACKS)
| MACH64_CRTC_VBLANK_INT);
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals( dev );
return IRQ_HANDLED;
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals(dev);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
int mach64_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
int mach64_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
{
unsigned int cur_vblank;
int ret = 0;
@ -77,9 +78,9 @@ int mach64_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
* by about a day rather than she wants to wait for years
* using vertical blanks...
*/
DRM_WAIT_ON( ret, dev->vbl_queue, 3*DRM_HZ,
( ( ( cur_vblank = atomic_read(&dev->vbl_received ) )
- *sequence ) <= (1<<23) ) );
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(&dev->vbl_received))
- *sequence) <= (1 << 23)));
*sequence = cur_vblank;
@ -88,42 +89,47 @@ int mach64_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
/* drm_dma.h hooks
*/
void mach64_driver_irq_preinstall( drm_device_t *dev ) {
void mach64_driver_irq_preinstall(drm_device_t * dev)
{
drm_mach64_private_t *dev_priv =
(drm_mach64_private_t *)dev->dev_private;
(drm_mach64_private_t *) dev->dev_private;
u32 status = MACH64_READ( MACH64_CRTC_INT_CNTL );
u32 status = MACH64_READ(MACH64_CRTC_INT_CNTL);
DRM_DEBUG("before install CRTC_INT_CTNL: 0x%08x\n", status );
DRM_DEBUG("before install CRTC_INT_CTNL: 0x%08x\n", status);
/* Disable and clear VBLANK interrupt */
MACH64_WRITE( MACH64_CRTC_INT_CNTL, (status & ~MACH64_CRTC_VBLANK_INT_EN)
| MACH64_CRTC_VBLANK_INT );
/* Disable and clear VBLANK interrupt */
MACH64_WRITE(MACH64_CRTC_INT_CNTL, (status & ~MACH64_CRTC_VBLANK_INT_EN)
| MACH64_CRTC_VBLANK_INT);
}
void mach64_driver_irq_postinstall( drm_device_t *dev ) {
void mach64_driver_irq_postinstall(drm_device_t * dev)
{
drm_mach64_private_t *dev_priv =
(drm_mach64_private_t *)dev->dev_private;
(drm_mach64_private_t *) dev->dev_private;
/* Turn on VBLANK interrupt */
MACH64_WRITE( MACH64_CRTC_INT_CNTL, MACH64_READ( MACH64_CRTC_INT_CNTL )
| MACH64_CRTC_VBLANK_INT_EN );
MACH64_WRITE(MACH64_CRTC_INT_CNTL, MACH64_READ(MACH64_CRTC_INT_CNTL)
| MACH64_CRTC_VBLANK_INT_EN);
DRM_DEBUG("after install CRTC_INT_CTNL: 0x%08x\n", MACH64_READ( MACH64_CRTC_INT_CNTL ));
DRM_DEBUG("after install CRTC_INT_CTNL: 0x%08x\n",
MACH64_READ(MACH64_CRTC_INT_CNTL));
}
void mach64_driver_irq_uninstall( drm_device_t *dev ) {
void mach64_driver_irq_uninstall(drm_device_t * dev)
{
drm_mach64_private_t *dev_priv =
(drm_mach64_private_t *)dev->dev_private;
if ( !dev_priv )
(drm_mach64_private_t *) dev->dev_private;
if (!dev_priv)
return;
/* Disable and clear VBLANK interrupt */
MACH64_WRITE( MACH64_CRTC_INT_CNTL,
(MACH64_READ( MACH64_CRTC_INT_CNTL ) & ~MACH64_CRTC_VBLANK_INT_EN)
| MACH64_CRTC_VBLANK_INT );
MACH64_WRITE(MACH64_CRTC_INT_CNTL,
(MACH64_READ(MACH64_CRTC_INT_CNTL) &
~MACH64_CRTC_VBLANK_INT_EN)
| MACH64_CRTC_VBLANK_INT);
DRM_DEBUG("after uninstall CRTC_INT_CTNL: 0x%08x\n",
MACH64_READ( MACH64_CRTC_INT_CNTL ));
MACH64_READ(MACH64_CRTC_INT_CNTL));
}

File diff suppressed because it is too large Load Diff

View File

@ -41,57 +41,57 @@
#define MGA_DEFAULT_USEC_TIMEOUT 10000
#define MGA_FREELIST_DEBUG 0
/* ================================================================
* Engine control
*/
int mga_do_wait_for_idle( drm_mga_private_t *dev_priv )
int mga_do_wait_for_idle(drm_mga_private_t * dev_priv)
{
u32 status = 0;
int i;
DRM_DEBUG( "\n" );
DRM_DEBUG("\n");
for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
status = MGA_READ( MGA_STATUS ) & MGA_ENGINE_IDLE_MASK;
if ( status == MGA_ENDPRDMASTS ) {
MGA_WRITE8( MGA_CRTC_INDEX, 0 );
for (i = 0; i < dev_priv->usec_timeout; i++) {
status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
if (status == MGA_ENDPRDMASTS) {
MGA_WRITE8(MGA_CRTC_INDEX, 0);
return 0;
}
DRM_UDELAY( 1 );
DRM_UDELAY(1);
}
#if MGA_DMA_DEBUG
DRM_ERROR( "failed!\n" );
DRM_INFO( " status=0x%08x\n", status );
DRM_ERROR("failed!\n");
DRM_INFO(" status=0x%08x\n", status);
#endif
return DRM_ERR(EBUSY);
}
int mga_do_dma_idle( drm_mga_private_t *dev_priv )
int mga_do_dma_idle(drm_mga_private_t * dev_priv)
{
u32 status = 0;
int i;
DRM_DEBUG( "\n" );
DRM_DEBUG("\n");
for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
status = MGA_READ( MGA_STATUS ) & MGA_DMA_IDLE_MASK;
if ( status == MGA_ENDPRDMASTS ) return 0;
DRM_UDELAY( 1 );
for (i = 0; i < dev_priv->usec_timeout; i++) {
status = MGA_READ(MGA_STATUS) & MGA_DMA_IDLE_MASK;
if (status == MGA_ENDPRDMASTS)
return 0;
DRM_UDELAY(1);
}
#if MGA_DMA_DEBUG
DRM_ERROR( "failed! status=0x%08x\n", status );
DRM_ERROR("failed! status=0x%08x\n", status);
#endif
return DRM_ERR(EBUSY);
}
int mga_do_dma_reset( drm_mga_private_t *dev_priv )
int mga_do_dma_reset(drm_mga_private_t * dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_primary_buffer_t *primary = &dev_priv->prim;
DRM_DEBUG( "\n" );
DRM_DEBUG("\n");
/* The primary DMA stream should look like new right about now.
*/
@ -110,16 +110,16 @@ int mga_do_dma_reset( drm_mga_private_t *dev_priv )
return 0;
}
int mga_do_engine_reset( drm_mga_private_t *dev_priv )
int mga_do_engine_reset(drm_mga_private_t * dev_priv)
{
DRM_DEBUG( "\n" );
DRM_DEBUG("\n");
/* Okay, so we've completely screwed up and locked the engine.
* How about we clean up after ourselves?
*/
MGA_WRITE( MGA_RST, MGA_SOFTRESET );
DRM_UDELAY( 15 ); /* Wait at least 10 usecs */
MGA_WRITE( MGA_RST, 0 );
MGA_WRITE(MGA_RST, MGA_SOFTRESET);
DRM_UDELAY(15); /* Wait at least 10 usecs */
MGA_WRITE(MGA_RST, 0);
/* Initialize the registers that get clobbered by the soft
* reset. Many of the core register values survive a reset,
@ -129,47 +129,46 @@ int mga_do_engine_reset( drm_mga_private_t *dev_priv )
* server should reset the engine state to known values.
*/
#if 0
MGA_WRITE( MGA_PRIMPTR,
virt_to_bus((void *)dev_priv->prim.status_page) |
MGA_PRIMPTREN0 |
MGA_PRIMPTREN1 );
MGA_WRITE(MGA_PRIMPTR,
virt_to_bus((void *)dev_priv->prim.status_page) |
MGA_PRIMPTREN0 | MGA_PRIMPTREN1);
#endif
MGA_WRITE( MGA_ICLEAR, MGA_SOFTRAPICLR );
MGA_WRITE( MGA_IEN, MGA_SOFTRAPIEN );
MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);
MGA_WRITE(MGA_IEN, MGA_SOFTRAPIEN);
/* The primary DMA stream should look like new right about now.
*/
mga_do_dma_reset( dev_priv );
mga_do_dma_reset(dev_priv);
/* This bad boy will never fail.
*/
return 0;
}
/* ================================================================
* Primary DMA stream
*/
void mga_do_dma_flush( drm_mga_private_t *dev_priv )
void mga_do_dma_flush(drm_mga_private_t * dev_priv)
{
drm_mga_primary_buffer_t *primary = &dev_priv->prim;
u32 head, tail;
u32 status = 0;
int i;
DMA_LOCALS;
DRM_DEBUG( "\n" );
DMA_LOCALS;
DRM_DEBUG("\n");
/* We need to wait so that we can do an safe flush */
for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
status = MGA_READ( MGA_STATUS ) & MGA_ENGINE_IDLE_MASK;
if ( status == MGA_ENDPRDMASTS ) break;
DRM_UDELAY( 1 );
/* We need to wait so that we can do an safe flush */
for (i = 0; i < dev_priv->usec_timeout; i++) {
status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
if (status == MGA_ENDPRDMASTS)
break;
DRM_UDELAY(1);
}
if ( primary->tail == primary->last_flush ) {
DRM_DEBUG( " bailing out...\n" );
if (primary->tail == primary->last_flush) {
DRM_DEBUG(" bailing out...\n");
return;
}
@ -179,48 +178,46 @@ void mga_do_dma_flush( drm_mga_private_t *dev_priv )
* actually (partially?) reads the first of these commands.
* See page 4-16 in the G400 manual, middle of the page or so.
*/
BEGIN_DMA( 1 );
BEGIN_DMA(1);
DMA_BLOCK( MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000 );
DMA_BLOCK(MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
ADVANCE_DMA();
primary->last_flush = primary->tail;
head = MGA_READ( MGA_PRIMADDRESS );
head = MGA_READ(MGA_PRIMADDRESS);
if ( head <= tail ) {
if (head <= tail) {
primary->space = primary->size - primary->tail;
} else {
primary->space = head - tail;
}
DRM_DEBUG( " head = 0x%06lx\n", head - dev_priv->primary->offset );
DRM_DEBUG( " tail = 0x%06lx\n", tail - dev_priv->primary->offset );
DRM_DEBUG( " space = 0x%06x\n", primary->space );
DRM_DEBUG(" head = 0x%06lx\n", head - dev_priv->primary->offset);
DRM_DEBUG(" tail = 0x%06lx\n", tail - dev_priv->primary->offset);
DRM_DEBUG(" space = 0x%06x\n", primary->space);
mga_flush_write_combine();
MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER );
MGA_WRITE(MGA_PRIMEND, tail | MGA_PAGPXFER);
DRM_DEBUG( "done.\n" );
DRM_DEBUG("done.\n");
}
void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv )
void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv)
{
drm_mga_primary_buffer_t *primary = &dev_priv->prim;
u32 head, tail;
DMA_LOCALS;
DRM_DEBUG( "\n" );
DRM_DEBUG("\n");
BEGIN_DMA_WRAP();
DMA_BLOCK( MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000 );
DMA_BLOCK(MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
ADVANCE_DMA();
@ -230,45 +227,43 @@ void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv )
primary->last_flush = 0;
primary->last_wrap++;
head = MGA_READ( MGA_PRIMADDRESS );
head = MGA_READ(MGA_PRIMADDRESS);
if ( head == dev_priv->primary->offset ) {
if (head == dev_priv->primary->offset) {
primary->space = primary->size;
} else {
primary->space = head - dev_priv->primary->offset;
}
DRM_DEBUG( " head = 0x%06lx\n",
head - dev_priv->primary->offset );
DRM_DEBUG( " tail = 0x%06x\n", primary->tail );
DRM_DEBUG( " wrap = %d\n", primary->last_wrap );
DRM_DEBUG( " space = 0x%06x\n", primary->space );
DRM_DEBUG(" head = 0x%06lx\n", head - dev_priv->primary->offset);
DRM_DEBUG(" tail = 0x%06x\n", primary->tail);
DRM_DEBUG(" wrap = %d\n", primary->last_wrap);
DRM_DEBUG(" space = 0x%06x\n", primary->space);
mga_flush_write_combine();
MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER );
MGA_WRITE(MGA_PRIMEND, tail | MGA_PAGPXFER);
set_bit( 0, &primary->wrapped );
DRM_DEBUG( "done.\n" );
set_bit(0, &primary->wrapped);
DRM_DEBUG("done.\n");
}
void mga_do_dma_wrap_end( drm_mga_private_t *dev_priv )
void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv)
{
drm_mga_primary_buffer_t *primary = &dev_priv->prim;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
u32 head = dev_priv->primary->offset;
DRM_DEBUG( "\n" );
DRM_DEBUG("\n");
sarea_priv->last_wrap++;
DRM_DEBUG( " wrap = %d\n", sarea_priv->last_wrap );
DRM_DEBUG(" wrap = %d\n", sarea_priv->last_wrap);
mga_flush_write_combine();
MGA_WRITE( MGA_PRIMADDRESS, head | MGA_DMA_GENERAL );
MGA_WRITE(MGA_PRIMADDRESS, head | MGA_DMA_GENERAL);
clear_bit( 0, &primary->wrapped );
DRM_DEBUG( "done.\n" );
clear_bit(0, &primary->wrapped);
DRM_DEBUG("done.\n");
}
/* ================================================================
* Freelist management
*/
@ -277,63 +272,61 @@ void mga_do_dma_wrap_end( drm_mga_private_t *dev_priv )
#define MGA_BUFFER_FREE 0
#if MGA_FREELIST_DEBUG
static void mga_freelist_print( drm_device_t *dev )
static void mga_freelist_print(drm_device_t * dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_freelist_t *entry;
DRM_INFO( "\n" );
DRM_INFO( "current dispatch: last=0x%x done=0x%x\n",
dev_priv->sarea_priv->last_dispatch,
(unsigned int)(MGA_READ( MGA_PRIMADDRESS ) -
dev_priv->primary->offset) );
DRM_INFO( "current freelist:\n" );
DRM_INFO("\n");
DRM_INFO("current dispatch: last=0x%x done=0x%x\n",
dev_priv->sarea_priv->last_dispatch,
(unsigned int)(MGA_READ(MGA_PRIMADDRESS) -
dev_priv->primary->offset));
DRM_INFO("current freelist:\n");
for ( entry = dev_priv->head->next ; entry ; entry = entry->next ) {
DRM_INFO( " %p idx=%2d age=0x%x 0x%06lx\n",
entry, entry->buf->idx, entry->age.head,
entry->age.head - dev_priv->primary->offset );
for (entry = dev_priv->head->next; entry; entry = entry->next) {
DRM_INFO(" %p idx=%2d age=0x%x 0x%06lx\n",
entry, entry->buf->idx, entry->age.head,
entry->age.head - dev_priv->primary->offset);
}
DRM_INFO( "\n" );
DRM_INFO("\n");
}
#endif
static int mga_freelist_init( drm_device_t *dev, drm_mga_private_t *dev_priv )
static int mga_freelist_init(drm_device_t * dev, drm_mga_private_t * dev_priv)
{
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf;
drm_mga_buf_priv_t *buf_priv;
drm_mga_freelist_t *entry;
int i;
DRM_DEBUG( "count=%d\n", dma->buf_count );
DRM_DEBUG("count=%d\n", dma->buf_count);
dev_priv->head = drm_alloc( sizeof(drm_mga_freelist_t),
DRM_MEM_DRIVER );
if ( dev_priv->head == NULL )
dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
if (dev_priv->head == NULL)
return DRM_ERR(ENOMEM);
memset( dev_priv->head, 0, sizeof(drm_mga_freelist_t) );
SET_AGE( &dev_priv->head->age, MGA_BUFFER_USED, 0 );
memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t));
SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0);
for ( i = 0 ; i < dma->buf_count ; i++ ) {
for (i = 0; i < dma->buf_count; i++) {
buf = dma->buflist[i];
buf_priv = buf->dev_private;
buf_priv = buf->dev_private;
entry = drm_alloc( sizeof(drm_mga_freelist_t),
DRM_MEM_DRIVER );
if ( entry == NULL )
entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
if (entry == NULL)
return DRM_ERR(ENOMEM);
memset( entry, 0, sizeof(drm_mga_freelist_t) );
memset(entry, 0, sizeof(drm_mga_freelist_t));
entry->next = dev_priv->head->next;
entry->prev = dev_priv->head;
SET_AGE( &entry->age, MGA_BUFFER_FREE, 0 );
SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
entry->buf = buf;
if ( dev_priv->head->next != NULL )
if (dev_priv->head->next != NULL)
dev_priv->head->next->prev = entry;
if ( entry->next == NULL )
if (entry->next == NULL)
dev_priv->tail = entry;
buf_priv->list_entry = entry;
@ -346,17 +339,17 @@ static int mga_freelist_init( drm_device_t *dev, drm_mga_private_t *dev_priv )
return 0;
}
static void mga_freelist_cleanup( drm_device_t *dev )
static void mga_freelist_cleanup(drm_device_t * dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_freelist_t *entry;
drm_mga_freelist_t *next;
DRM_DEBUG( "\n" );
DRM_DEBUG("\n");
entry = dev_priv->head;
while ( entry ) {
while (entry) {
next = entry->next;
drm_free( entry, sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER );
drm_free(entry, sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
entry = next;
}
@ -366,71 +359,69 @@ static void mga_freelist_cleanup( drm_device_t *dev )
#if 0
/* FIXME: Still needed?
*/
static void mga_freelist_reset( drm_device_t *dev )
static void mga_freelist_reset(drm_device_t * dev)
{
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf;
drm_mga_buf_priv_t *buf_priv;
int i;
for ( i = 0 ; i < dma->buf_count ; i++ ) {
for (i = 0; i < dma->buf_count; i++) {
buf = dma->buflist[i];
buf_priv = buf->dev_private;
SET_AGE( &buf_priv->list_entry->age,
MGA_BUFFER_FREE, 0 );
buf_priv = buf->dev_private;
SET_AGE(&buf_priv->list_entry->age, MGA_BUFFER_FREE, 0);
}
}
#endif
static drm_buf_t *mga_freelist_get( drm_device_t *dev )
static drm_buf_t *mga_freelist_get(drm_device_t * dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_freelist_t *next;
drm_mga_freelist_t *prev;
drm_mga_freelist_t *tail = dev_priv->tail;
u32 head, wrap;
DRM_DEBUG( "\n" );
DRM_DEBUG("\n");
head = MGA_READ( MGA_PRIMADDRESS );
head = MGA_READ(MGA_PRIMADDRESS);
wrap = dev_priv->sarea_priv->last_wrap;
DRM_DEBUG( " tail=0x%06lx %d\n",
tail->age.head ?
tail->age.head - dev_priv->primary->offset : 0,
tail->age.wrap );
DRM_DEBUG( " head=0x%06lx %d\n",
head - dev_priv->primary->offset, wrap );
DRM_DEBUG(" tail=0x%06lx %d\n",
tail->age.head ?
tail->age.head - dev_priv->primary->offset : 0,
tail->age.wrap);
DRM_DEBUG(" head=0x%06lx %d\n",
head - dev_priv->primary->offset, wrap);
if ( TEST_AGE( &tail->age, head, wrap ) ) {
if (TEST_AGE(&tail->age, head, wrap)) {
prev = dev_priv->tail->prev;
next = dev_priv->tail;
prev->next = NULL;
next->prev = next->next = NULL;
dev_priv->tail = prev;
SET_AGE( &next->age, MGA_BUFFER_USED, 0 );
SET_AGE(&next->age, MGA_BUFFER_USED, 0);
return next->buf;
}
DRM_DEBUG( "returning NULL!\n" );
DRM_DEBUG("returning NULL!\n");
return NULL;
}
int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf )
int mga_freelist_put(drm_device_t * dev, drm_buf_t * buf)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_buf_priv_t *buf_priv = buf->dev_private;
drm_mga_freelist_t *head, *entry, *prev;
DRM_DEBUG( "age=0x%06lx wrap=%d\n",
buf_priv->list_entry->age.head -
dev_priv->primary->offset,
buf_priv->list_entry->age.wrap );
DRM_DEBUG("age=0x%06lx wrap=%d\n",
buf_priv->list_entry->age.head -
dev_priv->primary->offset, buf_priv->list_entry->age.wrap);
entry = buf_priv->list_entry;
head = dev_priv->head;
if ( buf_priv->list_entry->age.head == MGA_BUFFER_USED ) {
SET_AGE( &entry->age, MGA_BUFFER_FREE, 0 );
if (buf_priv->list_entry->age.head == MGA_BUFFER_USED) {
SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
prev = dev_priv->tail;
prev->next = entry;
entry->prev = prev;
@ -446,43 +437,42 @@ int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf )
return 0;
}
/* ================================================================
* DMA initialization, cleanup
*/
static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
{
drm_mga_private_t *dev_priv;
int ret;
DRM_DEBUG( "\n" );
DRM_DEBUG("\n");
dev_priv = drm_alloc( sizeof(drm_mga_private_t), DRM_MEM_DRIVER );
if ( !dev_priv )
dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
if (!dev_priv)
return DRM_ERR(ENOMEM);
memset( dev_priv, 0, sizeof(drm_mga_private_t) );
memset(dev_priv, 0, sizeof(drm_mga_private_t));
dev_priv->chipset = init->chipset;
dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
if ( init->sgram ) {
if (init->sgram) {
dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK;
} else {
dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR;
}
dev_priv->maccess = init->maccess;
dev_priv->maccess = init->maccess;
dev_priv->fb_cpp = init->fb_cpp;
dev_priv->front_offset = init->front_offset;
dev_priv->front_pitch = init->front_pitch;
dev_priv->back_offset = init->back_offset;
dev_priv->back_pitch = init->back_pitch;
dev_priv->fb_cpp = init->fb_cpp;
dev_priv->front_offset = init->front_offset;
dev_priv->front_pitch = init->front_pitch;
dev_priv->back_offset = init->back_offset;
dev_priv->back_pitch = init->back_pitch;
dev_priv->depth_cpp = init->depth_cpp;
dev_priv->depth_offset = init->depth_offset;
dev_priv->depth_pitch = init->depth_pitch;
dev_priv->depth_cpp = init->depth_cpp;
dev_priv->depth_offset = init->depth_offset;
dev_priv->depth_pitch = init->depth_pitch;
/* FIXME: Need to support AGP textures...
*/
@ -491,108 +481,104 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
DRM_GETSAREA();
if(!dev_priv->sarea) {
DRM_ERROR( "failed to find sarea!\n" );
if (!dev_priv->sarea) {
DRM_ERROR("failed to find sarea!\n");
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
mga_do_cleanup_dma(dev);
return DRM_ERR(EINVAL);
}
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
if(!dev_priv->mmio) {
DRM_ERROR( "failed to find mmio region!\n" );
if (!dev_priv->mmio) {
DRM_ERROR("failed to find mmio region!\n");
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
mga_do_cleanup_dma(dev);
return DRM_ERR(EINVAL);
}
dev_priv->status = drm_core_findmap(dev, init->status_offset);
if(!dev_priv->status) {
DRM_ERROR( "failed to find status page!\n" );
if (!dev_priv->status) {
DRM_ERROR("failed to find status page!\n");
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
mga_do_cleanup_dma(dev);
return DRM_ERR(EINVAL);
}
dev_priv->warp = drm_core_findmap(dev, init->warp_offset);
if(!dev_priv->warp) {
DRM_ERROR( "failed to find warp microcode region!\n" );
if (!dev_priv->warp) {
DRM_ERROR("failed to find warp microcode region!\n");
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
mga_do_cleanup_dma(dev);
return DRM_ERR(EINVAL);
}
dev_priv->primary = drm_core_findmap(dev, init->primary_offset);
if(!dev_priv->primary) {
DRM_ERROR( "failed to find primary dma region!\n" );
if (!dev_priv->primary) {
DRM_ERROR("failed to find primary dma region!\n");
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
mga_do_cleanup_dma(dev);
return DRM_ERR(EINVAL);
}
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
if(!dev->agp_buffer_map) {
DRM_ERROR( "failed to find dma buffer region!\n" );
if (!dev->agp_buffer_map) {
DRM_ERROR("failed to find dma buffer region!\n");
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
mga_do_cleanup_dma(dev);
return DRM_ERR(EINVAL);
}
dev_priv->sarea_priv =
(drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle +
init->sarea_priv_offset);
(drm_mga_sarea_t *) ((u8 *) dev_priv->sarea->handle +
init->sarea_priv_offset);
drm_core_ioremap( dev_priv->warp, dev );
drm_core_ioremap( dev_priv->primary, dev );
drm_core_ioremap( dev->agp_buffer_map, dev );
drm_core_ioremap(dev_priv->warp, dev);
drm_core_ioremap(dev_priv->primary, dev);
drm_core_ioremap(dev->agp_buffer_map, dev);
if(!dev_priv->warp->handle ||
!dev_priv->primary->handle ||
!dev->agp_buffer_map->handle ) {
DRM_ERROR( "failed to ioremap agp regions!\n" );
if (!dev_priv->warp->handle ||
!dev_priv->primary->handle || !dev->agp_buffer_map->handle) {
DRM_ERROR("failed to ioremap agp regions!\n");
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
mga_do_cleanup_dma(dev);
return DRM_ERR(ENOMEM);
}
ret = mga_warp_install_microcode( dev_priv );
if ( ret < 0 ) {
DRM_ERROR( "failed to install WARP ucode!\n" );
ret = mga_warp_install_microcode(dev_priv);
if (ret < 0) {
DRM_ERROR("failed to install WARP ucode!\n");
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
mga_do_cleanup_dma(dev);
return ret;
}
ret = mga_warp_init( dev_priv );
if ( ret < 0 ) {
DRM_ERROR( "failed to init WARP engine!\n" );
ret = mga_warp_init(dev_priv);
if (ret < 0) {
DRM_ERROR("failed to init WARP engine!\n");
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
mga_do_cleanup_dma(dev);
return ret;
}
dev_priv->prim.status = (u32 *)dev_priv->status->handle;
dev_priv->prim.status = (u32 *) dev_priv->status->handle;
mga_do_wait_for_idle( dev_priv );
mga_do_wait_for_idle(dev_priv);
/* Init the primary DMA registers.
*/
MGA_WRITE( MGA_PRIMADDRESS,
dev_priv->primary->offset | MGA_DMA_GENERAL );
MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL);
#if 0
MGA_WRITE( MGA_PRIMPTR,
virt_to_bus((void *)dev_priv->prim.status) |
MGA_PRIMPTREN0 | /* Soft trap, SECEND, SETUPEND */
MGA_PRIMPTREN1 ); /* DWGSYNC */
MGA_WRITE(MGA_PRIMPTR, virt_to_bus((void *)dev_priv->prim.status) | MGA_PRIMPTREN0 | /* Soft trap, SECEND, SETUPEND */
MGA_PRIMPTREN1); /* DWGSYNC */
#endif
dev_priv->prim.start = (u8 *)dev_priv->primary->handle;
dev_priv->prim.end = ((u8 *)dev_priv->primary->handle
dev_priv->prim.start = (u8 *) dev_priv->primary->handle;
dev_priv->prim.end = ((u8 *) dev_priv->primary->handle
+ dev_priv->primary->size);
dev_priv->prim.size = dev_priv->primary->size;
@ -612,11 +598,11 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
dev_priv->sarea_priv->last_frame.head = 0;
dev_priv->sarea_priv->last_frame.wrap = 0;
if ( mga_freelist_init( dev, dev_priv ) < 0 ) {
DRM_ERROR( "could not initialize freelist\n" );
if (mga_freelist_init(dev, dev_priv) < 0) {
DRM_ERROR("could not initialize freelist\n");
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
mga_do_cleanup_dma(dev);
return DRM_ERR(ENOMEM);
}
@ -625,131 +611,132 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
return 0;
}
int mga_do_cleanup_dma( drm_device_t *dev )
int mga_do_cleanup_dma(drm_device_t * dev)
{
DRM_DEBUG( "\n" );
DRM_DEBUG("\n");
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
* is freed, it's too late.
*/
if ( dev->irq_enabled ) drm_irq_uninstall(dev);
if (dev->irq_enabled)
drm_irq_uninstall(dev);
if ( dev->dev_private ) {
if (dev->dev_private) {
drm_mga_private_t *dev_priv = dev->dev_private;
if ( dev_priv->warp != NULL )
drm_core_ioremapfree( dev_priv->warp, dev );
if ( dev_priv->primary != NULL )
drm_core_ioremapfree( dev_priv->primary, dev );
if ( dev->agp_buffer_map != NULL ) {
drm_core_ioremapfree( dev->agp_buffer_map, dev );
if (dev_priv->warp != NULL)
drm_core_ioremapfree(dev_priv->warp, dev);
if (dev_priv->primary != NULL)
drm_core_ioremapfree(dev_priv->primary, dev);
if (dev->agp_buffer_map != NULL) {
drm_core_ioremapfree(dev->agp_buffer_map, dev);
dev->agp_buffer_map = NULL;
}
if ( dev_priv->head != NULL ) {
mga_freelist_cleanup( dev );
if (dev_priv->head != NULL) {
mga_freelist_cleanup(dev);
}
drm_free( dev->dev_private, sizeof(drm_mga_private_t),
DRM_MEM_DRIVER );
drm_free(dev->dev_private, sizeof(drm_mga_private_t),
DRM_MEM_DRIVER);
dev->dev_private = NULL;
}
return 0;
}
int mga_dma_init( DRM_IOCTL_ARGS )
int mga_dma_init(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_mga_init_t init;
LOCK_TEST_WITH_RETURN( dev, filp );
LOCK_TEST_WITH_RETURN(dev, filp);
DRM_COPY_FROM_USER_IOCTL( init, (drm_mga_init_t __user *)data, sizeof(init) );
DRM_COPY_FROM_USER_IOCTL(init, (drm_mga_init_t __user *) data,
sizeof(init));
switch ( init.func ) {
switch (init.func) {
case MGA_INIT_DMA:
return mga_do_init_dma( dev, &init );
return mga_do_init_dma(dev, &init);
case MGA_CLEANUP_DMA:
return mga_do_cleanup_dma( dev );
return mga_do_cleanup_dma(dev);
}
return DRM_ERR(EINVAL);
}
/* ================================================================
* Primary DMA stream management
*/
int mga_dma_flush( DRM_IOCTL_ARGS )
int mga_dma_flush(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
drm_lock_t lock;
LOCK_TEST_WITH_RETURN( dev, filp );
LOCK_TEST_WITH_RETURN(dev, filp);
DRM_COPY_FROM_USER_IOCTL( lock, (drm_lock_t __user *)data, sizeof(lock) );
DRM_COPY_FROM_USER_IOCTL(lock, (drm_lock_t __user *) data,
sizeof(lock));
DRM_DEBUG( "%s%s%s\n",
(lock.flags & _DRM_LOCK_FLUSH) ? "flush, " : "",
(lock.flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "",
(lock.flags & _DRM_LOCK_QUIESCENT) ? "idle, " : "" );
DRM_DEBUG("%s%s%s\n",
(lock.flags & _DRM_LOCK_FLUSH) ? "flush, " : "",
(lock.flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "",
(lock.flags & _DRM_LOCK_QUIESCENT) ? "idle, " : "");
WRAP_WAIT_WITH_RETURN( dev_priv );
WRAP_WAIT_WITH_RETURN(dev_priv);
if ( lock.flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL) ) {
mga_do_dma_flush( dev_priv );
if (lock.flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) {
mga_do_dma_flush(dev_priv);
}
if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
if (lock.flags & _DRM_LOCK_QUIESCENT) {
#if MGA_DMA_DEBUG
int ret = mga_do_wait_for_idle( dev_priv );
if ( ret < 0 )
DRM_INFO( "%s: -EBUSY\n", __FUNCTION__ );
int ret = mga_do_wait_for_idle(dev_priv);
if (ret < 0)
DRM_INFO("%s: -EBUSY\n", __FUNCTION__);
return ret;
#else
return mga_do_wait_for_idle( dev_priv );
return mga_do_wait_for_idle(dev_priv);
#endif
} else {
return 0;
}
}
int mga_dma_reset( DRM_IOCTL_ARGS )
int mga_dma_reset(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
LOCK_TEST_WITH_RETURN( dev, filp );
LOCK_TEST_WITH_RETURN(dev, filp);
return mga_do_dma_reset( dev_priv );
return mga_do_dma_reset(dev_priv);
}
/* ================================================================
* DMA buffer management
*/
static int mga_dma_get_buffers( DRMFILE filp,
drm_device_t *dev, drm_dma_t *d )
static int mga_dma_get_buffers(DRMFILE filp, drm_device_t * dev, drm_dma_t * d)
{
drm_buf_t *buf;
int i;
for ( i = d->granted_count ; i < d->request_count ; i++ ) {
buf = mga_freelist_get( dev );
if ( !buf ) return DRM_ERR(EAGAIN);
for (i = d->granted_count; i < d->request_count; i++) {
buf = mga_freelist_get(dev);
if (!buf)
return DRM_ERR(EAGAIN);
buf->filp = filp;
if ( DRM_COPY_TO_USER( &d->request_indices[i],
&buf->idx, sizeof(buf->idx) ) )
if (DRM_COPY_TO_USER(&d->request_indices[i],
&buf->idx, sizeof(buf->idx)))
return DRM_ERR(EFAULT);
if ( DRM_COPY_TO_USER( &d->request_sizes[i],
&buf->total, sizeof(buf->total) ) )
if (DRM_COPY_TO_USER(&d->request_sizes[i],
&buf->total, sizeof(buf->total)))
return DRM_ERR(EFAULT);
d->granted_count++;
@ -757,55 +744,55 @@ static int mga_dma_get_buffers( DRMFILE filp,
return 0;
}
int mga_dma_buffers( DRM_IOCTL_ARGS )
int mga_dma_buffers(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
drm_dma_t __user *argp = (void __user *)data;
drm_dma_t d;
int ret = 0;
LOCK_TEST_WITH_RETURN( dev, filp );
LOCK_TEST_WITH_RETURN(dev, filp);
DRM_COPY_FROM_USER_IOCTL( d, argp, sizeof(d) );
DRM_COPY_FROM_USER_IOCTL(d, argp, sizeof(d));
/* Please don't send us buffers.
*/
if ( d.send_count != 0 ) {
DRM_ERROR( "Process %d trying to send %d buffers via drmDMA\n",
DRM_CURRENTPID, d.send_count );
if (d.send_count != 0) {
DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
DRM_CURRENTPID, d.send_count);
return DRM_ERR(EINVAL);
}
/* We'll send you buffers.
*/
if ( d.request_count < 0 || d.request_count > dma->buf_count ) {
DRM_ERROR( "Process %d trying to get %d buffers (of %d max)\n",
DRM_CURRENTPID, d.request_count, dma->buf_count );
if (d.request_count < 0 || d.request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
DRM_CURRENTPID, d.request_count, dma->buf_count);
return DRM_ERR(EINVAL);
}
WRAP_TEST_WITH_RETURN( dev_priv );
WRAP_TEST_WITH_RETURN(dev_priv);
d.granted_count = 0;
if ( d.request_count ) {
ret = mga_dma_get_buffers( filp, dev, &d );
if (d.request_count) {
ret = mga_dma_get_buffers(filp, dev, &d);
}
DRM_COPY_TO_USER_IOCTL( argp, d, sizeof(d) );
DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d));
return ret;
}
void mga_driver_pretakedown(drm_device_t *dev)
void mga_driver_pretakedown(drm_device_t * dev)
{
mga_do_cleanup_dma( dev );
mga_do_cleanup_dma(dev);
}
int mga_driver_dma_quiescent(drm_device_t *dev)
int mga_driver_dma_quiescent(drm_device_t * dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
return mga_do_wait_for_idle( dev_priv );
return mga_do_wait_for_idle(dev_priv);
}

View File

@ -44,10 +44,10 @@
/* WARP pipe flags
*/
#define MGA_F 0x1 /* fog */
#define MGA_A 0x2 /* alpha */
#define MGA_S 0x4 /* specular */
#define MGA_T2 0x8 /* multitexture */
#define MGA_F 0x1 /* fog */
#define MGA_A 0x2 /* alpha */
#define MGA_S 0x4 /* specular */
#define MGA_T2 0x8 /* multitexture */
#define MGA_WARP_TGZ 0
#define MGA_WARP_TGZF (MGA_F)
@ -66,15 +66,14 @@
#define MGA_WARP_T2GZSA (MGA_T2|MGA_S|MGA_A)
#define MGA_WARP_T2GZSAF (MGA_T2|MGA_S|MGA_F|MGA_A)
#define MGA_MAX_G200_PIPES 8 /* no multitex */
#define MGA_MAX_G200_PIPES 8 /* no multitex */
#define MGA_MAX_G400_PIPES 16
#define MGA_MAX_WARP_PIPES MGA_MAX_G400_PIPES
#define MGA_WARP_UCODE_SIZE 32768 /* in bytes */
#define MGA_WARP_UCODE_SIZE 32768 /* in bytes */
#define MGA_CARD_TYPE_G200 1
#define MGA_CARD_TYPE_G400 2
#define MGA_FRONT 0x1
#define MGA_BACK 0x2
#define MGA_DEPTH 0x4
@ -85,14 +84,14 @@
#define MGA_UPLOAD_TEX0 0x2
#define MGA_UPLOAD_TEX1 0x4
#define MGA_UPLOAD_PIPE 0x8
#define MGA_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */
#define MGA_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */
#define MGA_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */
#define MGA_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */
#define MGA_UPLOAD_2D 0x40
#define MGA_WAIT_AGE 0x80 /* handled client-side */
#define MGA_UPLOAD_CLIPRECTS 0x100 /* handled client-side */
#define MGA_WAIT_AGE 0x80 /* handled client-side */
#define MGA_UPLOAD_CLIPRECTS 0x100 /* handled client-side */
#if 0
#define MGA_DMA_FLUSH 0x200 /* set when someone gets the lock
quiescent */
#define MGA_DMA_FLUSH 0x200 /* set when someone gets the lock
quiescent */
#endif
/* 32 buffers of 64k each, total 2 meg.
@ -119,8 +118,7 @@
#define DRM_MGA_IDLE_RETRY 2048
#endif /* __MGA_SAREA_DEFINES__ */
#endif /* __MGA_SAREA_DEFINES__ */
/* Setup registers for 3D context
*/
@ -164,25 +162,25 @@ typedef struct {
/* General aging mechanism
*/
typedef struct {
unsigned int head; /* Position of head pointer */
unsigned int wrap; /* Primary DMA wrap count */
unsigned int head; /* Position of head pointer */
unsigned int wrap; /* Primary DMA wrap count */
} drm_mga_age_t;
typedef struct _drm_mga_sarea {
/* The channel for communication of state information to the kernel
* on firing a vertex dma buffer.
*/
drm_mga_context_regs_t context_state;
drm_mga_server_regs_t server_state;
drm_mga_texture_regs_t tex_state[2];
unsigned int warp_pipe;
unsigned int dirty;
unsigned int vertsize;
drm_mga_context_regs_t context_state;
drm_mga_server_regs_t server_state;
drm_mga_texture_regs_t tex_state[2];
unsigned int warp_pipe;
unsigned int dirty;
unsigned int vertsize;
/* The current cliprects, or a subset thereof.
*/
drm_clip_rect_t boxes[MGA_NR_SAREA_CLIPRECTS];
unsigned int nbox;
drm_clip_rect_t boxes[MGA_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Information about the most recently used 3d drawable. The
* client fills in the req_* fields, the server fills in the
@ -191,18 +189,18 @@ typedef struct _drm_mga_sarea {
* The client clears the exported_drawable field before
* clobbering the boxes data.
*/
unsigned int req_drawable; /* the X drawable id */
unsigned int req_draw_buffer; /* MGA_FRONT or MGA_BACK */
unsigned int req_drawable; /* the X drawable id */
unsigned int req_draw_buffer; /* MGA_FRONT or MGA_BACK */
unsigned int exported_drawable;
unsigned int exported_drawable;
unsigned int exported_index;
unsigned int exported_stamp;
unsigned int exported_buffers;
unsigned int exported_nfront;
unsigned int exported_nback;
unsigned int exported_stamp;
unsigned int exported_buffers;
unsigned int exported_nfront;
unsigned int exported_nback;
int exported_back_x, exported_front_x, exported_w;
int exported_back_y, exported_front_y, exported_h;
drm_clip_rect_t exported_boxes[MGA_NR_SAREA_CLIPRECTS];
drm_clip_rect_t exported_boxes[MGA_NR_SAREA_CLIPRECTS];
/* Counters for aging textures and for client-side throttling.
*/
@ -210,21 +208,20 @@ typedef struct _drm_mga_sarea {
unsigned int last_wrap;
drm_mga_age_t last_frame;
unsigned int last_enqueue; /* last time a buffer was enqueued */
unsigned int last_enqueue; /* last time a buffer was enqueued */
unsigned int last_dispatch; /* age of the most recently dispatched buffer */
unsigned int last_quiescent; /* */
unsigned int last_quiescent; /* */
/* LRU lists for texture memory in agp space and on the card.
*/
drm_tex_region_t texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS+1];
drm_tex_region_t texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1];
unsigned int texAge[MGA_NR_TEX_HEAPS];
/* Mechanism to validate card state.
*/
int ctxOwner;
int ctxOwner;
} drm_mga_sarea_t;
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (xf86drmMga.h)
*/
@ -255,33 +252,33 @@ typedef struct _drm_mga_sarea {
#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t)
typedef struct _drm_mga_warp_index {
int installed;
unsigned long phys_addr;
int size;
int installed;
unsigned long phys_addr;
int size;
} drm_mga_warp_index_t;
typedef struct drm_mga_init {
enum {
MGA_INIT_DMA = 0x01,
MGA_CLEANUP_DMA = 0x02
enum {
MGA_INIT_DMA = 0x01,
MGA_CLEANUP_DMA = 0x02
} func;
unsigned long sarea_priv_offset;
unsigned long sarea_priv_offset;
int chipset;
int sgram;
int sgram;
unsigned int maccess;
unsigned int fb_cpp;
unsigned int fb_cpp;
unsigned int front_offset, front_pitch;
unsigned int back_offset, back_pitch;
unsigned int back_offset, back_pitch;
unsigned int depth_cpp;
unsigned int depth_offset, depth_pitch;
unsigned int depth_cpp;
unsigned int depth_offset, depth_pitch;
unsigned int texture_offset[MGA_NR_TEX_HEAPS];
unsigned int texture_size[MGA_NR_TEX_HEAPS];
unsigned int texture_offset[MGA_NR_TEX_HEAPS];
unsigned int texture_size[MGA_NR_TEX_HEAPS];
unsigned long fb_offset;
unsigned long mmio_offset;
@ -293,7 +290,7 @@ typedef struct drm_mga_init {
typedef struct drm_mga_fullscreen {
enum {
MGA_INIT_FULLSCREEN = 0x01,
MGA_INIT_FULLSCREEN = 0x01,
MGA_CLEANUP_FULLSCREEN = 0x02
} func;
} drm_mga_fullscreen_t;
@ -307,16 +304,16 @@ typedef struct drm_mga_clear {
} drm_mga_clear_t;
typedef struct drm_mga_vertex {
int idx; /* buffer to queue */
int used; /* bytes in use */
int discard; /* client finished with buffer? */
int idx; /* buffer to queue */
int used; /* bytes in use */
int discard; /* client finished with buffer? */
} drm_mga_vertex_t;
typedef struct drm_mga_indices {
int idx; /* buffer to queue */
int idx; /* buffer to queue */
unsigned int start;
unsigned int end;
int discard; /* client finished with buffer? */
int discard; /* client finished with buffer? */
} drm_mga_indices_t;
typedef struct drm_mga_iload {
@ -332,7 +329,7 @@ typedef struct _drm_mga_blit {
int src_pitch, dst_pitch;
int delta_sx, delta_sy;
int delta_dx, delta_dy;
int height, ydir; /* flip image vertically */
int height, ydir; /* flip image vertically */
int source_pitch, dest_pitch;
} drm_mga_blit_t;

View File

@ -62,14 +62,14 @@ typedef struct drm_mga_primary_buffer {
} drm_mga_primary_buffer_t;
typedef struct drm_mga_freelist {
struct drm_mga_freelist *next;
struct drm_mga_freelist *prev;
struct drm_mga_freelist *next;
struct drm_mga_freelist *prev;
drm_mga_age_t age;
drm_buf_t *buf;
drm_buf_t *buf;
} drm_mga_freelist_t;
typedef struct {
drm_mga_freelist_t *list_entry;
drm_mga_freelist_t *list_entry;
int discard;
int dispatched;
} drm_mga_buf_priv_t;
@ -78,8 +78,8 @@ typedef struct drm_mga_private {
drm_mga_primary_buffer_t prim;
drm_mga_sarea_t *sarea_priv;
drm_mga_freelist_t *head;
drm_mga_freelist_t *tail;
drm_mga_freelist_t *head;
drm_mga_freelist_t *tail;
unsigned int warp_pipe;
unsigned long warp_pipe_phys[MGA_MAX_WARP_PIPES];
@ -113,43 +113,43 @@ typedef struct drm_mga_private {
} drm_mga_private_t;
/* mga_dma.c */
extern int mga_dma_init( DRM_IOCTL_ARGS );
extern int mga_dma_flush( DRM_IOCTL_ARGS );
extern int mga_dma_reset( DRM_IOCTL_ARGS );
extern int mga_dma_buffers( DRM_IOCTL_ARGS );
extern void mga_driver_pretakedown(drm_device_t *dev);
extern int mga_driver_dma_quiescent(drm_device_t *dev);
extern int mga_dma_init(DRM_IOCTL_ARGS);
extern int mga_dma_flush(DRM_IOCTL_ARGS);
extern int mga_dma_reset(DRM_IOCTL_ARGS);
extern int mga_dma_buffers(DRM_IOCTL_ARGS);
extern void mga_driver_pretakedown(drm_device_t * dev);
extern int mga_driver_dma_quiescent(drm_device_t * dev);
extern int mga_do_wait_for_idle( drm_mga_private_t *dev_priv );
extern int mga_do_dma_idle( drm_mga_private_t *dev_priv );
extern int mga_do_dma_reset( drm_mga_private_t *dev_priv );
extern int mga_do_engine_reset( drm_mga_private_t *dev_priv );
extern int mga_do_cleanup_dma( drm_device_t *dev );
extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
extern int mga_do_dma_idle(drm_mga_private_t * dev_priv);
extern int mga_do_dma_reset(drm_mga_private_t * dev_priv);
extern int mga_do_engine_reset(drm_mga_private_t * dev_priv);
extern int mga_do_cleanup_dma(drm_device_t * dev);
extern void mga_do_dma_flush( drm_mga_private_t *dev_priv );
extern void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv );
extern void mga_do_dma_wrap_end( drm_mga_private_t *dev_priv );
extern void mga_do_dma_flush(drm_mga_private_t * dev_priv);
extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv);
extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv);
extern int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf );
extern int mga_freelist_put(drm_device_t * dev, drm_buf_t * buf);
/* mga_state.c */
extern int mga_dma_clear( DRM_IOCTL_ARGS );
extern int mga_dma_swap( DRM_IOCTL_ARGS );
extern int mga_dma_vertex( DRM_IOCTL_ARGS );
extern int mga_dma_indices( DRM_IOCTL_ARGS );
extern int mga_dma_iload( DRM_IOCTL_ARGS );
extern int mga_dma_blit( DRM_IOCTL_ARGS );
extern int mga_getparam( DRM_IOCTL_ARGS );
extern int mga_dma_clear(DRM_IOCTL_ARGS);
extern int mga_dma_swap(DRM_IOCTL_ARGS);
extern int mga_dma_vertex(DRM_IOCTL_ARGS);
extern int mga_dma_indices(DRM_IOCTL_ARGS);
extern int mga_dma_iload(DRM_IOCTL_ARGS);
extern int mga_dma_blit(DRM_IOCTL_ARGS);
extern int mga_getparam(DRM_IOCTL_ARGS);
/* mga_warp.c */
extern int mga_warp_install_microcode( drm_mga_private_t *dev_priv );
extern int mga_warp_init( drm_mga_private_t *dev_priv );
extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
extern int mga_warp_init(drm_mga_private_t * dev_priv);
extern int mga_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence);
extern irqreturn_t mga_driver_irq_handler( DRM_IRQ_ARGS );
extern void mga_driver_irq_preinstall( drm_device_t *dev );
extern void mga_driver_irq_postinstall( drm_device_t *dev );
extern void mga_driver_irq_uninstall( drm_device_t *dev );
extern int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
extern void mga_driver_irq_preinstall(drm_device_t * dev);
extern void mga_driver_irq_postinstall(drm_device_t * dev);
extern void mga_driver_irq_uninstall(drm_device_t * dev);
#define mga_flush_write_combine() DRM_WRITEMEMORYBARRIER()
@ -165,7 +165,7 @@ extern void mga_driver_irq_uninstall( drm_device_t *dev );
#define MGA_WRITE( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF( reg ) = val; } while (0)
#define MGA_WRITE8( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8( reg ) = val; } while (0)
static inline u32 _MGA_READ(u32 *addr)
static inline u32 _MGA_READ(u32 * addr)
{
DRM_MEMORYBARRIER();
return *(volatile u32 *)addr;
@ -187,8 +187,6 @@ static inline u32 _MGA_READ(u32 *addr)
#define DMAREG1(r) (u8)(((r - DWGREG1) >> 2) | 0x80)
#define DMAREG(r) (ISREG0(r) ? DMAREG0(r) : DMAREG1(r))
/* ================================================================
* Helper macross...
*/
@ -230,7 +228,6 @@ do { \
} \
} while (0)
/* ================================================================
* Primary DMA command stream
*/
@ -315,7 +312,6 @@ do { \
write += DMA_BLOCK_SIZE; \
} while (0)
/* Buffer aging via primary DMA stream head pointer.
*/
@ -342,7 +338,6 @@ do { \
} \
} while (0)
#define MGA_ENGINE_IDLE_MASK (MGA_SOFTRAPEN | \
MGA_DWGENGSTS | \
MGA_ENDPRDMASTS)
@ -351,8 +346,6 @@ do { \
#define MGA_DMA_DEBUG 0
/* A reduced set of the mga registers.
*/
#define MGA_CRTC_INDEX 0x1fd4
@ -607,7 +600,6 @@ do { \
# define MGA_G400_WR_MAGIC (1 << 6)
# define MGA_G400_WR56_MAGIC 0x46480000 /* 12800.0f */
#define MGA_ILOAD_ALIGN 64
#define MGA_ILOAD_MASK (MGA_ILOAD_ALIGN - 1)
@ -642,10 +634,10 @@ do { \
/* Simple idle test.
*/
static __inline__ int mga_is_idle( drm_mga_private_t *dev_priv )
static __inline__ int mga_is_idle(drm_mga_private_t * dev_priv)
{
u32 status = MGA_READ( MGA_STATUS ) & MGA_ENGINE_IDLE_MASK;
return ( status == MGA_ENDPRDMASTS );
u32 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
return (status == MGA_ENDPRDMASTS);
}
#endif

View File

@ -35,27 +35,26 @@
#include "mga_drm.h"
#include "mga_drv.h"
irqreturn_t mga_driver_irq_handler( DRM_IRQ_ARGS )
irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
{
drm_device_t *dev = (drm_device_t *) arg;
drm_mga_private_t *dev_priv =
(drm_mga_private_t *)dev->dev_private;
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
int status;
status = MGA_READ( MGA_STATUS );
status = MGA_READ(MGA_STATUS);
/* VBLANK interrupt */
if ( status & MGA_VLINEPEN ) {
MGA_WRITE( MGA_ICLEAR, MGA_VLINEICLR );
if (status & MGA_VLINEPEN) {
MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals( dev );
drm_vbl_send_signals(dev);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
int mga_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
{
unsigned int cur_vblank;
int ret = 0;
@ -64,39 +63,39 @@ int mga_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
* by about a day rather than she wants to wait for years
* using vertical blanks...
*/
DRM_WAIT_ON( ret, dev->vbl_queue, 3*DRM_HZ,
( ( ( cur_vblank = atomic_read(&dev->vbl_received ) )
- *sequence ) <= (1<<23) ) );
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(&dev->vbl_received))
- *sequence) <= (1 << 23)));
*sequence = cur_vblank;
return ret;
}
void mga_driver_irq_preinstall( drm_device_t *dev ) {
drm_mga_private_t *dev_priv =
(drm_mga_private_t *)dev->dev_private;
void mga_driver_irq_preinstall(drm_device_t * dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
/* Disable *all* interrupts */
MGA_WRITE( MGA_IEN, 0 );
MGA_WRITE(MGA_IEN, 0);
/* Clear bits if they're already high */
MGA_WRITE( MGA_ICLEAR, ~0 );
MGA_WRITE(MGA_ICLEAR, ~0);
}
void mga_driver_irq_postinstall( drm_device_t *dev ) {
drm_mga_private_t *dev_priv =
(drm_mga_private_t *)dev->dev_private;
void mga_driver_irq_postinstall(drm_device_t * dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
/* Turn on VBL interrupt */
MGA_WRITE( MGA_IEN, MGA_VLINEIEN );
MGA_WRITE(MGA_IEN, MGA_VLINEIEN);
}
void mga_driver_irq_uninstall( drm_device_t *dev ) {
drm_mga_private_t *dev_priv =
(drm_mga_private_t *)dev->dev_private;
void mga_driver_irq_uninstall(drm_device_t * dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
if (!dev_priv)
return;
/* Disable *all* interrupts */
MGA_WRITE( MGA_IEN, 0 );
MGA_WRITE(MGA_IEN, 0);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -33,8 +33,7 @@
#include "mga_drv.h"
#include "mga_ucode.h"
#define MGA_WARP_CODE_ALIGN 256 /* in bytes */
#define MGA_WARP_CODE_ALIGN 256 /* in bytes */
#define WARP_UCODE_SIZE( which ) \
((sizeof(which) / MGA_WARP_CODE_ALIGN + 1) * MGA_WARP_CODE_ALIGN)
@ -48,125 +47,122 @@ do { \
vcbase += WARP_UCODE_SIZE( which ); \
} while (0)
static unsigned int mga_warp_g400_microcode_size( drm_mga_private_t *dev_priv )
static unsigned int mga_warp_g400_microcode_size(drm_mga_private_t * dev_priv)
{
unsigned int size;
size = ( WARP_UCODE_SIZE( warp_g400_tgz ) +
WARP_UCODE_SIZE( warp_g400_tgza ) +
WARP_UCODE_SIZE( warp_g400_tgzaf ) +
WARP_UCODE_SIZE( warp_g400_tgzf ) +
WARP_UCODE_SIZE( warp_g400_tgzs ) +
WARP_UCODE_SIZE( warp_g400_tgzsa ) +
WARP_UCODE_SIZE( warp_g400_tgzsaf ) +
WARP_UCODE_SIZE( warp_g400_tgzsf ) +
WARP_UCODE_SIZE( warp_g400_t2gz ) +
WARP_UCODE_SIZE( warp_g400_t2gza ) +
WARP_UCODE_SIZE( warp_g400_t2gzaf ) +
WARP_UCODE_SIZE( warp_g400_t2gzf ) +
WARP_UCODE_SIZE( warp_g400_t2gzs ) +
WARP_UCODE_SIZE( warp_g400_t2gzsa ) +
WARP_UCODE_SIZE( warp_g400_t2gzsaf ) +
WARP_UCODE_SIZE( warp_g400_t2gzsf ) );
size = (WARP_UCODE_SIZE(warp_g400_tgz) +
WARP_UCODE_SIZE(warp_g400_tgza) +
WARP_UCODE_SIZE(warp_g400_tgzaf) +
WARP_UCODE_SIZE(warp_g400_tgzf) +
WARP_UCODE_SIZE(warp_g400_tgzs) +
WARP_UCODE_SIZE(warp_g400_tgzsa) +
WARP_UCODE_SIZE(warp_g400_tgzsaf) +
WARP_UCODE_SIZE(warp_g400_tgzsf) +
WARP_UCODE_SIZE(warp_g400_t2gz) +
WARP_UCODE_SIZE(warp_g400_t2gza) +
WARP_UCODE_SIZE(warp_g400_t2gzaf) +
WARP_UCODE_SIZE(warp_g400_t2gzf) +
WARP_UCODE_SIZE(warp_g400_t2gzs) +
WARP_UCODE_SIZE(warp_g400_t2gzsa) +
WARP_UCODE_SIZE(warp_g400_t2gzsaf) +
WARP_UCODE_SIZE(warp_g400_t2gzsf));
size = PAGE_ALIGN( size );
size = PAGE_ALIGN(size);
DRM_DEBUG( "G400 ucode size = %d bytes\n", size );
DRM_DEBUG("G400 ucode size = %d bytes\n", size);
return size;
}
static unsigned int mga_warp_g200_microcode_size( drm_mga_private_t *dev_priv )
static unsigned int mga_warp_g200_microcode_size(drm_mga_private_t * dev_priv)
{
unsigned int size;
size = ( WARP_UCODE_SIZE( warp_g200_tgz ) +
WARP_UCODE_SIZE( warp_g200_tgza ) +
WARP_UCODE_SIZE( warp_g200_tgzaf ) +
WARP_UCODE_SIZE( warp_g200_tgzf ) +
WARP_UCODE_SIZE( warp_g200_tgzs ) +
WARP_UCODE_SIZE( warp_g200_tgzsa ) +
WARP_UCODE_SIZE( warp_g200_tgzsaf ) +
WARP_UCODE_SIZE( warp_g200_tgzsf ) );
size = (WARP_UCODE_SIZE(warp_g200_tgz) +
WARP_UCODE_SIZE(warp_g200_tgza) +
WARP_UCODE_SIZE(warp_g200_tgzaf) +
WARP_UCODE_SIZE(warp_g200_tgzf) +
WARP_UCODE_SIZE(warp_g200_tgzs) +
WARP_UCODE_SIZE(warp_g200_tgzsa) +
WARP_UCODE_SIZE(warp_g200_tgzsaf) +
WARP_UCODE_SIZE(warp_g200_tgzsf));
size = PAGE_ALIGN( size );
size = PAGE_ALIGN(size);
DRM_DEBUG( "G200 ucode size = %d bytes\n", size );
DRM_DEBUG("G200 ucode size = %d bytes\n", size);
return size;
}
static int mga_warp_install_g400_microcode( drm_mga_private_t *dev_priv )
static int mga_warp_install_g400_microcode(drm_mga_private_t * dev_priv)
{
unsigned char *vcbase = dev_priv->warp->handle;
unsigned long pcbase = dev_priv->warp->offset;
unsigned int size;
size = mga_warp_g400_microcode_size( dev_priv );
if ( size > dev_priv->warp->size ) {
DRM_ERROR( "microcode too large! (%u > %lu)\n",
size, dev_priv->warp->size );
size = mga_warp_g400_microcode_size(dev_priv);
if (size > dev_priv->warp->size) {
DRM_ERROR("microcode too large! (%u > %lu)\n",
size, dev_priv->warp->size);
return DRM_ERR(ENOMEM);
}
memset( dev_priv->warp_pipe_phys, 0,
sizeof(dev_priv->warp_pipe_phys) );
memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
WARP_UCODE_INSTALL( warp_g400_tgz, MGA_WARP_TGZ );
WARP_UCODE_INSTALL( warp_g400_tgzf, MGA_WARP_TGZF );
WARP_UCODE_INSTALL( warp_g400_tgza, MGA_WARP_TGZA );
WARP_UCODE_INSTALL( warp_g400_tgzaf, MGA_WARP_TGZAF );
WARP_UCODE_INSTALL( warp_g400_tgzs, MGA_WARP_TGZS );
WARP_UCODE_INSTALL( warp_g400_tgzsf, MGA_WARP_TGZSF );
WARP_UCODE_INSTALL( warp_g400_tgzsa, MGA_WARP_TGZSA );
WARP_UCODE_INSTALL( warp_g400_tgzsaf, MGA_WARP_TGZSAF );
WARP_UCODE_INSTALL(warp_g400_tgz, MGA_WARP_TGZ);
WARP_UCODE_INSTALL(warp_g400_tgzf, MGA_WARP_TGZF);
WARP_UCODE_INSTALL(warp_g400_tgza, MGA_WARP_TGZA);
WARP_UCODE_INSTALL(warp_g400_tgzaf, MGA_WARP_TGZAF);
WARP_UCODE_INSTALL(warp_g400_tgzs, MGA_WARP_TGZS);
WARP_UCODE_INSTALL(warp_g400_tgzsf, MGA_WARP_TGZSF);
WARP_UCODE_INSTALL(warp_g400_tgzsa, MGA_WARP_TGZSA);
WARP_UCODE_INSTALL(warp_g400_tgzsaf, MGA_WARP_TGZSAF);
WARP_UCODE_INSTALL( warp_g400_t2gz, MGA_WARP_T2GZ );
WARP_UCODE_INSTALL( warp_g400_t2gzf, MGA_WARP_T2GZF );
WARP_UCODE_INSTALL( warp_g400_t2gza, MGA_WARP_T2GZA );
WARP_UCODE_INSTALL( warp_g400_t2gzaf, MGA_WARP_T2GZAF );
WARP_UCODE_INSTALL( warp_g400_t2gzs, MGA_WARP_T2GZS );
WARP_UCODE_INSTALL( warp_g400_t2gzsf, MGA_WARP_T2GZSF );
WARP_UCODE_INSTALL( warp_g400_t2gzsa, MGA_WARP_T2GZSA );
WARP_UCODE_INSTALL( warp_g400_t2gzsaf, MGA_WARP_T2GZSAF );
WARP_UCODE_INSTALL(warp_g400_t2gz, MGA_WARP_T2GZ);
WARP_UCODE_INSTALL(warp_g400_t2gzf, MGA_WARP_T2GZF);
WARP_UCODE_INSTALL(warp_g400_t2gza, MGA_WARP_T2GZA);
WARP_UCODE_INSTALL(warp_g400_t2gzaf, MGA_WARP_T2GZAF);
WARP_UCODE_INSTALL(warp_g400_t2gzs, MGA_WARP_T2GZS);
WARP_UCODE_INSTALL(warp_g400_t2gzsf, MGA_WARP_T2GZSF);
WARP_UCODE_INSTALL(warp_g400_t2gzsa, MGA_WARP_T2GZSA);
WARP_UCODE_INSTALL(warp_g400_t2gzsaf, MGA_WARP_T2GZSAF);
return 0;
}
static int mga_warp_install_g200_microcode( drm_mga_private_t *dev_priv )
static int mga_warp_install_g200_microcode(drm_mga_private_t * dev_priv)
{
unsigned char *vcbase = dev_priv->warp->handle;
unsigned long pcbase = dev_priv->warp->offset;
unsigned int size;
size = mga_warp_g200_microcode_size( dev_priv );
if ( size > dev_priv->warp->size ) {
DRM_ERROR( "microcode too large! (%u > %lu)\n",
size, dev_priv->warp->size );
size = mga_warp_g200_microcode_size(dev_priv);
if (size > dev_priv->warp->size) {
DRM_ERROR("microcode too large! (%u > %lu)\n",
size, dev_priv->warp->size);
return DRM_ERR(ENOMEM);
}
memset( dev_priv->warp_pipe_phys, 0,
sizeof(dev_priv->warp_pipe_phys) );
memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
WARP_UCODE_INSTALL( warp_g200_tgz, MGA_WARP_TGZ );
WARP_UCODE_INSTALL( warp_g200_tgzf, MGA_WARP_TGZF );
WARP_UCODE_INSTALL( warp_g200_tgza, MGA_WARP_TGZA );
WARP_UCODE_INSTALL( warp_g200_tgzaf, MGA_WARP_TGZAF );
WARP_UCODE_INSTALL( warp_g200_tgzs, MGA_WARP_TGZS );
WARP_UCODE_INSTALL( warp_g200_tgzsf, MGA_WARP_TGZSF );
WARP_UCODE_INSTALL( warp_g200_tgzsa, MGA_WARP_TGZSA );
WARP_UCODE_INSTALL( warp_g200_tgzsaf, MGA_WARP_TGZSAF );
WARP_UCODE_INSTALL(warp_g200_tgz, MGA_WARP_TGZ);
WARP_UCODE_INSTALL(warp_g200_tgzf, MGA_WARP_TGZF);
WARP_UCODE_INSTALL(warp_g200_tgza, MGA_WARP_TGZA);
WARP_UCODE_INSTALL(warp_g200_tgzaf, MGA_WARP_TGZAF);
WARP_UCODE_INSTALL(warp_g200_tgzs, MGA_WARP_TGZS);
WARP_UCODE_INSTALL(warp_g200_tgzsf, MGA_WARP_TGZSF);
WARP_UCODE_INSTALL(warp_g200_tgzsa, MGA_WARP_TGZSA);
WARP_UCODE_INSTALL(warp_g200_tgzsaf, MGA_WARP_TGZSAF);
return 0;
}
int mga_warp_install_microcode( drm_mga_private_t *dev_priv )
int mga_warp_install_microcode(drm_mga_private_t * dev_priv)
{
switch ( dev_priv->chipset ) {
switch (dev_priv->chipset) {
case MGA_CARD_TYPE_G400:
return mga_warp_install_g400_microcode( dev_priv );
return mga_warp_install_g400_microcode(dev_priv);
case MGA_CARD_TYPE_G200:
return mga_warp_install_g200_microcode( dev_priv );
return mga_warp_install_g200_microcode(dev_priv);
default:
return DRM_ERR(EINVAL);
}
@ -174,35 +170,34 @@ int mga_warp_install_microcode( drm_mga_private_t *dev_priv )
#define WMISC_EXPECTED (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE)
int mga_warp_init( drm_mga_private_t *dev_priv )
int mga_warp_init(drm_mga_private_t * dev_priv)
{
u32 wmisc;
/* FIXME: Get rid of these damned magic numbers...
*/
switch ( dev_priv->chipset ) {
switch (dev_priv->chipset) {
case MGA_CARD_TYPE_G400:
MGA_WRITE( MGA_WIADDR2, MGA_WMODE_SUSPEND );
MGA_WRITE( MGA_WGETMSB, 0x00000E00 );
MGA_WRITE( MGA_WVRTXSZ, 0x00001807 );
MGA_WRITE( MGA_WACCEPTSEQ, 0x18000000 );
MGA_WRITE(MGA_WIADDR2, MGA_WMODE_SUSPEND);
MGA_WRITE(MGA_WGETMSB, 0x00000E00);
MGA_WRITE(MGA_WVRTXSZ, 0x00001807);
MGA_WRITE(MGA_WACCEPTSEQ, 0x18000000);
break;
case MGA_CARD_TYPE_G200:
MGA_WRITE( MGA_WIADDR, MGA_WMODE_SUSPEND );
MGA_WRITE( MGA_WGETMSB, 0x1606 );
MGA_WRITE( MGA_WVRTXSZ, 7 );
MGA_WRITE(MGA_WIADDR, MGA_WMODE_SUSPEND);
MGA_WRITE(MGA_WGETMSB, 0x1606);
MGA_WRITE(MGA_WVRTXSZ, 7);
break;
default:
return DRM_ERR(EINVAL);
}
MGA_WRITE( MGA_WMISC, (MGA_WUCODECACHE_ENABLE |
MGA_WMASTER_ENABLE |
MGA_WCACHEFLUSH_ENABLE) );
wmisc = MGA_READ( MGA_WMISC );
if ( wmisc != WMISC_EXPECTED ) {
DRM_ERROR( "WARP engine config failed! 0x%x != 0x%x\n",
wmisc, WMISC_EXPECTED );
MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE |
MGA_WMASTER_ENABLE | MGA_WCACHEFLUSH_ENABLE));
wmisc = MGA_READ(MGA_WMISC);
if (wmisc != WMISC_EXPECTED) {
DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n",
wmisc, WMISC_EXPECTED);
return DRM_ERR(EINVAL);
}

File diff suppressed because it is too large Load Diff

View File

@ -93,7 +93,7 @@
#define R128_MAX_TEXTURE_LEVELS 11
#define R128_MAX_TEXTURE_UNITS 2
#endif /* __R128_SAREA_DEFINES__ */
#endif /* __R128_SAREA_DEFINES__ */
typedef struct {
/* Context state - can be written in one large chunk */
@ -140,7 +140,6 @@ typedef struct {
unsigned int tex_border_color;
} drm_r128_texture_regs_t;
typedef struct drm_r128_sarea {
/* The channel for communication of state information to the kernel
* on firing a vertex buffer.
@ -161,14 +160,13 @@ typedef struct drm_r128_sarea {
unsigned int last_frame;
unsigned int last_dispatch;
drm_tex_region_t tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS+1];
drm_tex_region_t tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1];
unsigned int tex_age[R128_NR_TEX_HEAPS];
int ctx_owner;
int pfAllowPageFlip; /* number of 3d windows (0,1,2 or more) */
int pfCurrentPage; /* which buffer is being displayed? */
int pfAllowPageFlip; /* number of 3d windows (0,1,2 or more) */
int pfCurrentPage; /* which buffer is being displayed? */
} drm_r128_sarea_t;
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (xf86drmR128.h)
*/
@ -220,7 +218,7 @@ typedef struct drm_r128_sarea {
typedef struct drm_r128_init {
enum {
R128_INIT_CCE = 0x01,
R128_INIT_CCE = 0x01,
R128_CLEANUP_CCE = 0x02
} func;
#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
@ -278,9 +276,9 @@ typedef struct drm_r128_clear {
typedef struct drm_r128_vertex {
int prim;
int idx; /* Index of vertex buffer */
int count; /* Number of vertices in buffer */
int discard; /* Client finished with buffer? */
int idx; /* Index of vertex buffer */
int count; /* Number of vertices in buffer */
int discard; /* Client finished with buffer? */
} drm_r128_vertex_t;
typedef struct drm_r128_indices {
@ -288,7 +286,7 @@ typedef struct drm_r128_indices {
int idx;
int start;
int end;
int discard; /* Client finished with buffer? */
int discard; /* Client finished with buffer? */
} drm_r128_indices_t;
typedef struct drm_r128_blit {
@ -302,10 +300,10 @@ typedef struct drm_r128_blit {
typedef struct drm_r128_depth {
enum {
R128_WRITE_SPAN = 0x01,
R128_WRITE_PIXELS = 0x02,
R128_READ_SPAN = 0x03,
R128_READ_PIXELS = 0x04
R128_WRITE_SPAN = 0x01,
R128_WRITE_PIXELS = 0x02,
R128_READ_SPAN = 0x03,
R128_READ_PIXELS = 0x04
} func;
int n;
int __user *x;
@ -327,7 +325,7 @@ typedef struct drm_r128_indirect {
typedef struct drm_r128_fullscreen {
enum {
R128_INIT_FULLSCREEN = 0x01,
R128_INIT_FULLSCREEN = 0x01,
R128_CLEANUP_FULLSCREEN = 0x02
} func;
} drm_r128_fullscreen_t;

View File

@ -46,14 +46,13 @@
#define DRIVER_MINOR 5
#define DRIVER_PATCHLEVEL 0
#define GET_RING_HEAD(dev_priv) R128_READ( R128_PM4_BUFFER_DL_RPTR )
typedef struct drm_r128_freelist {
unsigned int age;
drm_buf_t *buf;
struct drm_r128_freelist *next;
struct drm_r128_freelist *prev;
unsigned int age;
drm_buf_t *buf;
struct drm_r128_freelist *next;
struct drm_r128_freelist *prev;
} drm_r128_freelist_t;
typedef struct drm_r128_ring_buffer {
@ -77,8 +76,8 @@ typedef struct drm_r128_private {
int cce_fifo_size;
int cce_running;
drm_r128_freelist_t *head;
drm_r128_freelist_t *tail;
drm_r128_freelist_t *head;
drm_r128_freelist_t *tail;
int usec_timeout;
int is_pci;
@ -121,48 +120,48 @@ typedef struct drm_r128_buf_priv {
int prim;
int discard;
int dispatched;
drm_r128_freelist_t *list_entry;
drm_r128_freelist_t *list_entry;
} drm_r128_buf_priv_t;
/* r128_cce.c */
extern int r128_cce_init( DRM_IOCTL_ARGS );
extern int r128_cce_start( DRM_IOCTL_ARGS );
extern int r128_cce_stop( DRM_IOCTL_ARGS );
extern int r128_cce_reset( DRM_IOCTL_ARGS );
extern int r128_cce_idle( DRM_IOCTL_ARGS );
extern int r128_engine_reset( DRM_IOCTL_ARGS );
extern int r128_fullscreen( DRM_IOCTL_ARGS );
extern int r128_cce_buffers( DRM_IOCTL_ARGS );
extern int r128_getparam( DRM_IOCTL_ARGS );
extern int r128_cce_init(DRM_IOCTL_ARGS);
extern int r128_cce_start(DRM_IOCTL_ARGS);
extern int r128_cce_stop(DRM_IOCTL_ARGS);
extern int r128_cce_reset(DRM_IOCTL_ARGS);
extern int r128_cce_idle(DRM_IOCTL_ARGS);
extern int r128_engine_reset(DRM_IOCTL_ARGS);
extern int r128_fullscreen(DRM_IOCTL_ARGS);
extern int r128_cce_buffers(DRM_IOCTL_ARGS);
extern int r128_getparam(DRM_IOCTL_ARGS);
extern void r128_freelist_reset( drm_device_t *dev );
extern drm_buf_t *r128_freelist_get( drm_device_t *dev );
extern void r128_freelist_reset(drm_device_t * dev);
extern drm_buf_t *r128_freelist_get(drm_device_t * dev);
extern int r128_wait_ring( drm_r128_private_t *dev_priv, int n );
extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n);
extern int r128_do_cce_idle( drm_r128_private_t *dev_priv );
extern int r128_do_cleanup_cce( drm_device_t *dev );
extern int r128_do_cleanup_pageflip( drm_device_t *dev );
extern int r128_do_cce_idle(drm_r128_private_t * dev_priv);
extern int r128_do_cleanup_cce(drm_device_t * dev);
extern int r128_do_cleanup_pageflip(drm_device_t * dev);
/* r128_state.c */
extern int r128_cce_clear( DRM_IOCTL_ARGS );
extern int r128_cce_swap( DRM_IOCTL_ARGS );
extern int r128_cce_flip( DRM_IOCTL_ARGS );
extern int r128_cce_vertex( DRM_IOCTL_ARGS );
extern int r128_cce_indices( DRM_IOCTL_ARGS );
extern int r128_cce_blit( DRM_IOCTL_ARGS );
extern int r128_cce_depth( DRM_IOCTL_ARGS );
extern int r128_cce_stipple( DRM_IOCTL_ARGS );
extern int r128_cce_indirect( DRM_IOCTL_ARGS );
extern int r128_cce_clear(DRM_IOCTL_ARGS);
extern int r128_cce_swap(DRM_IOCTL_ARGS);
extern int r128_cce_flip(DRM_IOCTL_ARGS);
extern int r128_cce_vertex(DRM_IOCTL_ARGS);
extern int r128_cce_indices(DRM_IOCTL_ARGS);
extern int r128_cce_blit(DRM_IOCTL_ARGS);
extern int r128_cce_depth(DRM_IOCTL_ARGS);
extern int r128_cce_stipple(DRM_IOCTL_ARGS);
extern int r128_cce_indirect(DRM_IOCTL_ARGS);
extern int r128_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence);
extern int r128_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
extern irqreturn_t r128_driver_irq_handler( DRM_IRQ_ARGS );
extern void r128_driver_irq_preinstall( drm_device_t *dev );
extern void r128_driver_irq_postinstall( drm_device_t *dev );
extern void r128_driver_irq_uninstall( drm_device_t *dev );
extern void r128_driver_pretakedown(drm_device_t *dev);
extern void r128_driver_prerelease(drm_device_t *dev, DRMFILE filp);
extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
extern void r128_driver_irq_preinstall(drm_device_t * dev);
extern void r128_driver_irq_postinstall(drm_device_t * dev);
extern void r128_driver_irq_uninstall(drm_device_t * dev);
extern void r128_driver_pretakedown(drm_device_t * dev);
extern void r128_driver_prerelease(drm_device_t * dev, DRMFILE filp);
/* Register definitions, register access macros and drmAddMap constants
* for Rage 128 kernel driver.
@ -271,7 +270,6 @@ extern void r128_driver_prerelease(drm_device_t *dev, DRMFILE filp);
# define R128_EVENT_CRTC_OFFSET (1 << 0)
#define R128_WINDOW_XY_OFFSET 0x1bcc
/* CCE registers
*/
#define R128_PM4_BUFFER_OFFSET 0x0700
@ -322,7 +320,6 @@ extern void r128_driver_prerelease(drm_device_t *dev, DRMFILE filp);
#define R128_PM4_FIFO_DATA_EVEN 0x1000
#define R128_PM4_FIFO_DATA_ODD 0x1004
/* CCE command packets
*/
#define R128_CCE_PACKET0 0x00000000
@ -400,8 +397,7 @@ do { \
R128_WRITE(R128_CLOCK_CNTL_DATA, (val)); \
} while (0)
extern int R128_READ_PLL(drm_device_t *dev, int addr);
extern int R128_READ_PLL(drm_device_t * dev, int addr);
#define CCE_PACKET0( reg, n ) (R128_CCE_PACKET0 | \
((n) << 16) | ((reg) >> 2))
@ -411,13 +407,11 @@ extern int R128_READ_PLL(drm_device_t *dev, int addr);
#define CCE_PACKET3( pkt, n ) (R128_CCE_PACKET3 | \
(pkt) | ((n) << 16))
static __inline__ void
r128_update_ring_snapshot( drm_r128_private_t *dev_priv )
static __inline__ void r128_update_ring_snapshot(drm_r128_private_t * dev_priv)
{
drm_r128_ring_buffer_t *ring = &dev_priv->ring;
ring->space = (GET_RING_HEAD( dev_priv ) - ring->tail) * sizeof(u32);
if ( ring->space <= 0 )
ring->space = (GET_RING_HEAD(dev_priv) - ring->tail) * sizeof(u32);
if (ring->space <= 0)
ring->space += ring->size;
}
@ -458,7 +452,6 @@ do { \
OUT_RING( R128_EVENT_CRTC_OFFSET ); \
} while (0)
/* ================================================================
* Ring control
*/
@ -528,4 +521,4 @@ do { \
write &= tail_mask; \
} while (0)
#endif /* __R128_DRV_H__ */
#endif /* __R128_DRV_H__ */

View File

@ -35,27 +35,26 @@
#include "r128_drm.h"
#include "r128_drv.h"
irqreturn_t r128_driver_irq_handler( DRM_IRQ_ARGS )
irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
{
drm_device_t *dev = (drm_device_t *) arg;
drm_r128_private_t *dev_priv =
(drm_r128_private_t *)dev->dev_private;
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
int status;
status = R128_READ( R128_GEN_INT_STATUS );
status = R128_READ(R128_GEN_INT_STATUS);
/* VBLANK interrupt */
if ( status & R128_CRTC_VBLANK_INT ) {
R128_WRITE( R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK );
if (status & R128_CRTC_VBLANK_INT) {
R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals( dev );
drm_vbl_send_signals(dev);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
int r128_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
int r128_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
{
unsigned int cur_vblank;
int ret = 0;
@ -64,39 +63,39 @@ int r128_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
* by about a day rather than she wants to wait for years
* using vertical blanks...
*/
DRM_WAIT_ON( ret, dev->vbl_queue, 3*DRM_HZ,
( ( ( cur_vblank = atomic_read(&dev->vbl_received ) )
- *sequence ) <= (1<<23) ) );
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(&dev->vbl_received))
- *sequence) <= (1 << 23)));
*sequence = cur_vblank;
return ret;
}
void r128_driver_irq_preinstall( drm_device_t *dev ) {
drm_r128_private_t *dev_priv =
(drm_r128_private_t *)dev->dev_private;
void r128_driver_irq_preinstall(drm_device_t * dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
/* Disable *all* interrupts */
R128_WRITE( R128_GEN_INT_CNTL, 0 );
R128_WRITE(R128_GEN_INT_CNTL, 0);
/* Clear vblank bit if it's already high */
R128_WRITE( R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK );
R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
}
void r128_driver_irq_postinstall( drm_device_t *dev ) {
drm_r128_private_t *dev_priv =
(drm_r128_private_t *)dev->dev_private;
void r128_driver_irq_postinstall(drm_device_t * dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
/* Turn on VBL interrupt */
R128_WRITE( R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN );
R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
}
void r128_driver_irq_uninstall( drm_device_t *dev ) {
drm_r128_private_t *dev_priv =
(drm_r128_private_t *)dev->dev_private;
void r128_driver_irq_uninstall(drm_device_t * dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
if (!dev_priv)
return;
/* Disable *all* interrupts */
R128_WRITE( R128_GEN_INT_CNTL, 0 );
R128_WRITE(R128_GEN_INT_CNTL, 0);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -57,78 +57,77 @@
#define RADEON_UPLOAD_TEX0IMAGES 0x00001000
#define RADEON_UPLOAD_TEX1IMAGES 0x00002000
#define RADEON_UPLOAD_TEX2IMAGES 0x00004000
#define RADEON_UPLOAD_CLIPRECTS 0x00008000 /* handled client-side */
#define RADEON_UPLOAD_CLIPRECTS 0x00008000 /* handled client-side */
#define RADEON_REQUIRE_QUIESCENCE 0x00010000
#define RADEON_UPLOAD_ZBIAS 0x00020000 /* version 1.2 and newer */
#define RADEON_UPLOAD_ZBIAS 0x00020000 /* version 1.2 and newer */
#define RADEON_UPLOAD_ALL 0x003effff
#define RADEON_UPLOAD_CONTEXT_ALL 0x003e01ff
/* New style per-packet identifiers for use in cmd_buffer ioctl with
* the RADEON_EMIT_PACKET command. Comments relate new packets to old
* state bits and the packet size:
*/
#define RADEON_EMIT_PP_MISC 0 /* context/7 */
#define RADEON_EMIT_PP_CNTL 1 /* context/3 */
#define RADEON_EMIT_RB3D_COLORPITCH 2 /* context/1 */
#define RADEON_EMIT_RE_LINE_PATTERN 3 /* line/2 */
#define RADEON_EMIT_SE_LINE_WIDTH 4 /* line/1 */
#define RADEON_EMIT_PP_LUM_MATRIX 5 /* bumpmap/1 */
#define RADEON_EMIT_PP_ROT_MATRIX_0 6 /* bumpmap/2 */
#define RADEON_EMIT_RB3D_STENCILREFMASK 7 /* masks/3 */
#define RADEON_EMIT_SE_VPORT_XSCALE 8 /* viewport/6 */
#define RADEON_EMIT_SE_CNTL 9 /* setup/2 */
#define RADEON_EMIT_SE_CNTL_STATUS 10 /* setup/1 */
#define RADEON_EMIT_RE_MISC 11 /* misc/1 */
#define RADEON_EMIT_PP_TXFILTER_0 12 /* tex0/6 */
#define RADEON_EMIT_PP_BORDER_COLOR_0 13 /* tex0/1 */
#define RADEON_EMIT_PP_TXFILTER_1 14 /* tex1/6 */
#define RADEON_EMIT_PP_BORDER_COLOR_1 15 /* tex1/1 */
#define RADEON_EMIT_PP_TXFILTER_2 16 /* tex2/6 */
#define RADEON_EMIT_PP_BORDER_COLOR_2 17 /* tex2/1 */
#define RADEON_EMIT_SE_ZBIAS_FACTOR 18 /* zbias/2 */
#define RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT 19 /* tcl/11 */
#define RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED 20 /* material/17 */
#define R200_EMIT_PP_TXCBLEND_0 21 /* tex0/4 */
#define R200_EMIT_PP_TXCBLEND_1 22 /* tex1/4 */
#define R200_EMIT_PP_TXCBLEND_2 23 /* tex2/4 */
#define R200_EMIT_PP_TXCBLEND_3 24 /* tex3/4 */
#define R200_EMIT_PP_TXCBLEND_4 25 /* tex4/4 */
#define R200_EMIT_PP_TXCBLEND_5 26 /* tex5/4 */
#define R200_EMIT_PP_TXCBLEND_6 27 /* /4 */
#define R200_EMIT_PP_TXCBLEND_7 28 /* /4 */
#define R200_EMIT_TCL_LIGHT_MODEL_CTL_0 29 /* tcl/7 */
#define R200_EMIT_TFACTOR_0 30 /* tf/7 */
#define R200_EMIT_VTX_FMT_0 31 /* vtx/5 */
#define R200_EMIT_VAP_CTL 32 /* vap/1 */
#define R200_EMIT_MATRIX_SELECT_0 33 /* msl/5 */
#define R200_EMIT_TEX_PROC_CTL_2 34 /* tcg/5 */
#define R200_EMIT_TCL_UCP_VERT_BLEND_CTL 35 /* tcl/1 */
#define R200_EMIT_PP_TXFILTER_0 36 /* tex0/6 */
#define R200_EMIT_PP_TXFILTER_1 37 /* tex1/6 */
#define R200_EMIT_PP_TXFILTER_2 38 /* tex2/6 */
#define R200_EMIT_PP_TXFILTER_3 39 /* tex3/6 */
#define R200_EMIT_PP_TXFILTER_4 40 /* tex4/6 */
#define R200_EMIT_PP_TXFILTER_5 41 /* tex5/6 */
#define R200_EMIT_PP_TXOFFSET_0 42 /* tex0/1 */
#define R200_EMIT_PP_TXOFFSET_1 43 /* tex1/1 */
#define R200_EMIT_PP_TXOFFSET_2 44 /* tex2/1 */
#define R200_EMIT_PP_TXOFFSET_3 45 /* tex3/1 */
#define R200_EMIT_PP_TXOFFSET_4 46 /* tex4/1 */
#define R200_EMIT_PP_TXOFFSET_5 47 /* tex5/1 */
#define R200_EMIT_VTE_CNTL 48 /* vte/1 */
#define R200_EMIT_OUTPUT_VTX_COMP_SEL 49 /* vtx/1 */
#define R200_EMIT_PP_TAM_DEBUG3 50 /* tam/1 */
#define R200_EMIT_PP_CNTL_X 51 /* cst/1 */
#define R200_EMIT_RB3D_DEPTHXY_OFFSET 52 /* cst/1 */
#define R200_EMIT_RE_AUX_SCISSOR_CNTL 53 /* cst/1 */
#define R200_EMIT_RE_SCISSOR_TL_0 54 /* cst/2 */
#define R200_EMIT_RE_SCISSOR_TL_1 55 /* cst/2 */
#define R200_EMIT_RE_SCISSOR_TL_2 56 /* cst/2 */
#define R200_EMIT_SE_VAP_CNTL_STATUS 57 /* cst/1 */
#define R200_EMIT_SE_VTX_STATE_CNTL 58 /* cst/1 */
#define R200_EMIT_RE_POINTSIZE 59 /* cst/1 */
#define R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0 60 /* cst/4 */
#define RADEON_EMIT_PP_MISC 0 /* context/7 */
#define RADEON_EMIT_PP_CNTL 1 /* context/3 */
#define RADEON_EMIT_RB3D_COLORPITCH 2 /* context/1 */
#define RADEON_EMIT_RE_LINE_PATTERN 3 /* line/2 */
#define RADEON_EMIT_SE_LINE_WIDTH 4 /* line/1 */
#define RADEON_EMIT_PP_LUM_MATRIX 5 /* bumpmap/1 */
#define RADEON_EMIT_PP_ROT_MATRIX_0 6 /* bumpmap/2 */
#define RADEON_EMIT_RB3D_STENCILREFMASK 7 /* masks/3 */
#define RADEON_EMIT_SE_VPORT_XSCALE 8 /* viewport/6 */
#define RADEON_EMIT_SE_CNTL 9 /* setup/2 */
#define RADEON_EMIT_SE_CNTL_STATUS 10 /* setup/1 */
#define RADEON_EMIT_RE_MISC 11 /* misc/1 */
#define RADEON_EMIT_PP_TXFILTER_0 12 /* tex0/6 */
#define RADEON_EMIT_PP_BORDER_COLOR_0 13 /* tex0/1 */
#define RADEON_EMIT_PP_TXFILTER_1 14 /* tex1/6 */
#define RADEON_EMIT_PP_BORDER_COLOR_1 15 /* tex1/1 */
#define RADEON_EMIT_PP_TXFILTER_2 16 /* tex2/6 */
#define RADEON_EMIT_PP_BORDER_COLOR_2 17 /* tex2/1 */
#define RADEON_EMIT_SE_ZBIAS_FACTOR 18 /* zbias/2 */
#define RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT 19 /* tcl/11 */
#define RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED 20 /* material/17 */
#define R200_EMIT_PP_TXCBLEND_0 21 /* tex0/4 */
#define R200_EMIT_PP_TXCBLEND_1 22 /* tex1/4 */
#define R200_EMIT_PP_TXCBLEND_2 23 /* tex2/4 */
#define R200_EMIT_PP_TXCBLEND_3 24 /* tex3/4 */
#define R200_EMIT_PP_TXCBLEND_4 25 /* tex4/4 */
#define R200_EMIT_PP_TXCBLEND_5 26 /* tex5/4 */
#define R200_EMIT_PP_TXCBLEND_6 27 /* /4 */
#define R200_EMIT_PP_TXCBLEND_7 28 /* /4 */
#define R200_EMIT_TCL_LIGHT_MODEL_CTL_0 29 /* tcl/7 */
#define R200_EMIT_TFACTOR_0 30 /* tf/7 */
#define R200_EMIT_VTX_FMT_0 31 /* vtx/5 */
#define R200_EMIT_VAP_CTL 32 /* vap/1 */
#define R200_EMIT_MATRIX_SELECT_0 33 /* msl/5 */
#define R200_EMIT_TEX_PROC_CTL_2 34 /* tcg/5 */
#define R200_EMIT_TCL_UCP_VERT_BLEND_CTL 35 /* tcl/1 */
#define R200_EMIT_PP_TXFILTER_0 36 /* tex0/6 */
#define R200_EMIT_PP_TXFILTER_1 37 /* tex1/6 */
#define R200_EMIT_PP_TXFILTER_2 38 /* tex2/6 */
#define R200_EMIT_PP_TXFILTER_3 39 /* tex3/6 */
#define R200_EMIT_PP_TXFILTER_4 40 /* tex4/6 */
#define R200_EMIT_PP_TXFILTER_5 41 /* tex5/6 */
#define R200_EMIT_PP_TXOFFSET_0 42 /* tex0/1 */
#define R200_EMIT_PP_TXOFFSET_1 43 /* tex1/1 */
#define R200_EMIT_PP_TXOFFSET_2 44 /* tex2/1 */
#define R200_EMIT_PP_TXOFFSET_3 45 /* tex3/1 */
#define R200_EMIT_PP_TXOFFSET_4 46 /* tex4/1 */
#define R200_EMIT_PP_TXOFFSET_5 47 /* tex5/1 */
#define R200_EMIT_VTE_CNTL 48 /* vte/1 */
#define R200_EMIT_OUTPUT_VTX_COMP_SEL 49 /* vtx/1 */
#define R200_EMIT_PP_TAM_DEBUG3 50 /* tam/1 */
#define R200_EMIT_PP_CNTL_X 51 /* cst/1 */
#define R200_EMIT_RB3D_DEPTHXY_OFFSET 52 /* cst/1 */
#define R200_EMIT_RE_AUX_SCISSOR_CNTL 53 /* cst/1 */
#define R200_EMIT_RE_SCISSOR_TL_0 54 /* cst/2 */
#define R200_EMIT_RE_SCISSOR_TL_1 55 /* cst/2 */
#define R200_EMIT_RE_SCISSOR_TL_2 56 /* cst/2 */
#define R200_EMIT_SE_VAP_CNTL_STATUS 57 /* cst/1 */
#define R200_EMIT_SE_VTX_STATE_CNTL 58 /* cst/1 */
#define R200_EMIT_RE_POINTSIZE 59 /* cst/1 */
#define R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0 60 /* cst/4 */
#define R200_EMIT_PP_CUBIC_FACES_0 61
#define R200_EMIT_PP_CUBIC_OFFSETS_0 62
#define R200_EMIT_PP_CUBIC_FACES_1 63
@ -147,21 +146,19 @@
#define R200_EMIT_RB3D_BLENDCOLOR 76
#define RADEON_MAX_STATE_PACKETS 77
/* Commands understood by cmd_buffer ioctl. More can be added but
* obviously these can't be removed or changed:
*/
#define RADEON_CMD_PACKET 1 /* emit one of the register packets above */
#define RADEON_CMD_SCALARS 2 /* emit scalar data */
#define RADEON_CMD_VECTORS 3 /* emit vector data */
#define RADEON_CMD_DMA_DISCARD 4 /* discard current dma buf */
#define RADEON_CMD_PACKET3 5 /* emit hw packet */
#define RADEON_CMD_PACKET3_CLIP 6 /* emit hw packet wrapped in cliprects */
#define RADEON_CMD_SCALARS2 7 /* r200 stopgap */
#define RADEON_CMD_WAIT 8 /* emit hw wait commands -- note:
* doesn't make the cpu wait, just
* the graphics hardware */
#define RADEON_CMD_PACKET 1 /* emit one of the register packets above */
#define RADEON_CMD_SCALARS 2 /* emit scalar data */
#define RADEON_CMD_VECTORS 3 /* emit vector data */
#define RADEON_CMD_DMA_DISCARD 4 /* discard current dma buf */
#define RADEON_CMD_PACKET3 5 /* emit hw packet */
#define RADEON_CMD_PACKET3_CLIP 6 /* emit hw packet wrapped in cliprects */
#define RADEON_CMD_SCALARS2 7 /* r200 stopgap */
#define RADEON_CMD_WAIT 8 /* emit hw wait commands -- note:
* doesn't make the cpu wait, just
* the graphics hardware */
typedef union {
int i;
@ -188,7 +185,6 @@ typedef union {
#define RADEON_WAIT_2D 0x1
#define RADEON_WAIT_3D 0x2
#define RADEON_FRONT 0x1
#define RADEON_BACK 0x2
#define RADEON_DEPTH 0x4
@ -234,7 +230,7 @@ typedef union {
#define RADEON_OFFSET_ALIGN (1 << RADEON_OFFSET_SHIFT)
#define RADEON_OFFSET_MASK (RADEON_OFFSET_ALIGN - 1)
#endif /* __RADEON_SAREA_DEFINES__ */
#endif /* __RADEON_SAREA_DEFINES__ */
typedef struct {
unsigned int red;
@ -245,7 +241,7 @@ typedef struct {
typedef struct {
/* Context state */
unsigned int pp_misc; /* 0x1c14 */
unsigned int pp_misc; /* 0x1c14 */
unsigned int pp_fog_color;
unsigned int re_solid_color;
unsigned int rb3d_blendcntl;
@ -253,7 +249,7 @@ typedef struct {
unsigned int rb3d_depthpitch;
unsigned int rb3d_zstencilcntl;
unsigned int pp_cntl; /* 0x1c38 */
unsigned int pp_cntl; /* 0x1c38 */
unsigned int rb3d_cntl;
unsigned int rb3d_coloroffset;
unsigned int re_width_height;
@ -261,27 +257,27 @@ typedef struct {
unsigned int se_cntl;
/* Vertex format state */
unsigned int se_coord_fmt; /* 0x1c50 */
unsigned int se_coord_fmt; /* 0x1c50 */
/* Line state */
unsigned int re_line_pattern; /* 0x1cd0 */
unsigned int re_line_pattern; /* 0x1cd0 */
unsigned int re_line_state;
unsigned int se_line_width; /* 0x1db8 */
unsigned int se_line_width; /* 0x1db8 */
/* Bumpmap state */
unsigned int pp_lum_matrix; /* 0x1d00 */
unsigned int pp_lum_matrix; /* 0x1d00 */
unsigned int pp_rot_matrix_0; /* 0x1d58 */
unsigned int pp_rot_matrix_0; /* 0x1d58 */
unsigned int pp_rot_matrix_1;
/* Mask state */
unsigned int rb3d_stencilrefmask; /* 0x1d7c */
unsigned int rb3d_stencilrefmask; /* 0x1d7c */
unsigned int rb3d_ropcntl;
unsigned int rb3d_planemask;
/* Viewport state */
unsigned int se_vport_xscale; /* 0x1d98 */
unsigned int se_vport_xscale; /* 0x1d98 */
unsigned int se_vport_xoffset;
unsigned int se_vport_yscale;
unsigned int se_vport_yoffset;
@ -289,20 +285,19 @@ typedef struct {
unsigned int se_vport_zoffset;
/* Setup state */
unsigned int se_cntl_status; /* 0x2140 */
unsigned int se_cntl_status; /* 0x2140 */
/* Misc state */
unsigned int re_top_left; /* 0x26c0 */
unsigned int re_top_left; /* 0x26c0 */
unsigned int re_misc;
} drm_radeon_context_regs_t;
typedef struct {
/* Zbias state */
unsigned int se_zbias_factor; /* 0x1dac */
unsigned int se_zbias_factor; /* 0x1dac */
unsigned int se_zbias_constant;
} drm_radeon_context2_regs_t;
/* Setup registers for each texture unit
*/
typedef struct {
@ -320,11 +315,10 @@ typedef struct {
unsigned int finish;
unsigned int prim:8;
unsigned int stateidx:8;
unsigned int numverts:16; /* overloaded as offset/64 for elt prims */
unsigned int vc_format; /* vertex format */
unsigned int numverts:16; /* overloaded as offset/64 for elt prims */
unsigned int vc_format; /* vertex format */
} drm_radeon_prim_t;
typedef struct {
drm_radeon_context_regs_t context;
drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS];
@ -332,7 +326,6 @@ typedef struct {
unsigned int dirty;
} drm_radeon_state_t;
typedef struct {
/* The channel for communication of state information to the
* kernel on firing a vertex buffer with either of the
@ -355,15 +348,15 @@ typedef struct {
unsigned int last_dispatch;
unsigned int last_clear;
drm_tex_region_t tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS+1];
drm_tex_region_t tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS +
1];
unsigned int tex_age[RADEON_NR_TEX_HEAPS];
int ctx_owner;
int pfState; /* number of 3d windows (0,1,2ormore) */
int pfCurrentPage; /* which buffer is being displayed? */
int crtc2_base; /* CRTC2 frame offset */
int pfState; /* number of 3d windows (0,1,2ormore) */
int pfCurrentPage; /* which buffer is being displayed? */
int crtc2_base; /* CRTC2 frame offset */
} drm_radeon_sarea_t;
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (xf86drmRadeon.h)
*
@ -428,12 +421,12 @@ typedef struct {
typedef struct drm_radeon_init {
enum {
RADEON_INIT_CP = 0x01,
RADEON_INIT_CP = 0x01,
RADEON_CLEANUP_CP = 0x02,
RADEON_INIT_R200_CP = 0x03
} func;
unsigned long sarea_priv_offset;
int is_pci; /* not used, driver asks hardware */
int is_pci; /* not used, driver asks hardware */
int cp_mode;
int gart_size;
int ring_size;
@ -460,7 +453,7 @@ typedef struct drm_radeon_cp_stop {
typedef struct drm_radeon_fullscreen {
enum {
RADEON_INIT_FULLSCREEN = 0x01,
RADEON_INIT_FULLSCREEN = 0x01,
RADEON_CLEANUP_FULLSCREEN = 0x02
} func;
} drm_radeon_fullscreen_t;
@ -481,15 +474,15 @@ typedef struct drm_radeon_clear {
unsigned int clear_color;
unsigned int clear_depth;
unsigned int color_mask;
unsigned int depth_mask; /* misnamed field: should be stencil */
unsigned int depth_mask; /* misnamed field: should be stencil */
drm_radeon_clear_rect_t __user *depth_boxes;
} drm_radeon_clear_t;
typedef struct drm_radeon_vertex {
int prim;
int idx; /* Index of vertex buffer */
int count; /* Number of vertices in buffer */
int discard; /* Client finished with buffer? */
int idx; /* Index of vertex buffer */
int count; /* Number of vertices in buffer */
int discard; /* Client finished with buffer? */
} drm_radeon_vertex_t;
typedef struct drm_radeon_indices {
@ -497,7 +490,7 @@ typedef struct drm_radeon_indices {
int idx;
int start;
int end;
int discard; /* Client finished with buffer? */
int discard; /* Client finished with buffer? */
} drm_radeon_indices_t;
/* v1.2 - obsoletes drm_radeon_vertex and drm_radeon_indices
@ -505,8 +498,8 @@ typedef struct drm_radeon_indices {
* - supports driver change to emit native primitives
*/
typedef struct drm_radeon_vertex2 {
int idx; /* Index of vertex buffer */
int discard; /* Client finished with buffer? */
int idx; /* Index of vertex buffer */
int discard; /* Client finished with buffer? */
int nr_states;
drm_radeon_state_t __user *state;
int nr_prims;
@ -531,7 +524,7 @@ typedef struct drm_radeon_cmd_buffer {
} drm_radeon_cmd_buffer_t;
typedef struct drm_radeon_tex_image {
unsigned int x, y; /* Blit coordinates */
unsigned int x, y; /* Blit coordinates */
unsigned int width, height;
const void __user *data;
} drm_radeon_tex_image_t;
@ -540,7 +533,7 @@ typedef struct drm_radeon_texture {
unsigned int offset;
int pitch;
int format;
int width; /* Texture image coordinates */
int width; /* Texture image coordinates */
int height;
drm_radeon_tex_image_t __user *image;
} drm_radeon_texture_t;
@ -556,19 +549,18 @@ typedef struct drm_radeon_indirect {
int discard;
} drm_radeon_indirect_t;
/* 1.3: An ioctl to get parameters that aren't available to the 3d
* client any other way.
*/
#define RADEON_PARAM_GART_BUFFER_OFFSET 1 /* card offset of 1st GART buffer */
#define RADEON_PARAM_GART_BUFFER_OFFSET 1 /* card offset of 1st GART buffer */
#define RADEON_PARAM_LAST_FRAME 2
#define RADEON_PARAM_LAST_DISPATCH 3
#define RADEON_PARAM_LAST_CLEAR 4
/* Added with DRM version 1.6. */
#define RADEON_PARAM_IRQ_NR 5
#define RADEON_PARAM_GART_BASE 6 /* card offset of GART base */
#define RADEON_PARAM_GART_BASE 6 /* card offset of GART base */
/* Added with DRM version 1.8. */
#define RADEON_PARAM_REGISTER_HANDLE 7 /* for drmMap() */
#define RADEON_PARAM_REGISTER_HANDLE 7 /* for drmMap() */
#define RADEON_PARAM_STATUS_HANDLE 8
#define RADEON_PARAM_SAREA_HANDLE 9
#define RADEON_PARAM_GART_TEX_HANDLE 10
@ -602,7 +594,6 @@ typedef struct drm_radeon_mem_init_heap {
int start;
} drm_radeon_mem_init_heap_t;
/* 1.6: Userspace can request & wait on irq's:
*/
typedef struct drm_radeon_irq_emit {
@ -613,17 +604,15 @@ typedef struct drm_radeon_irq_wait {
int irq_seq;
} drm_radeon_irq_wait_t;
/* 1.10: Clients tell the DRM where they think the framebuffer is located in
* the card's address space, via a new generic ioctl to set parameters
*/
typedef struct drm_radeon_setparam {
unsigned int param;
int64_t value;
int64_t value;
} drm_radeon_setparam_t;
#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */
#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */
#endif

View File

@ -44,7 +44,6 @@
#define DRIVER_MINOR 11
#define DRIVER_PATCHLEVEL 0
enum radeon_family {
CHIP_R100,
CHIP_RS100,
@ -69,22 +68,22 @@ enum radeon_family {
* Chip flags
*/
enum radeon_chip_flags {
CHIP_FAMILY_MASK = 0x0000ffffUL,
CHIP_FLAGS_MASK = 0xffff0000UL,
CHIP_IS_MOBILITY = 0x00010000UL,
CHIP_IS_IGP = 0x00020000UL,
CHIP_SINGLE_CRTC = 0x00040000UL,
CHIP_IS_AGP = 0x00080000UL,
CHIP_FAMILY_MASK = 0x0000ffffUL,
CHIP_FLAGS_MASK = 0xffff0000UL,
CHIP_IS_MOBILITY = 0x00010000UL,
CHIP_IS_IGP = 0x00020000UL,
CHIP_SINGLE_CRTC = 0x00040000UL,
CHIP_IS_AGP = 0x00080000UL,
};
#define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 )
#define SET_RING_HEAD(dev_priv,val) DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) )
typedef struct drm_radeon_freelist {
unsigned int age;
drm_buf_t *buf;
struct drm_radeon_freelist *next;
struct drm_radeon_freelist *prev;
unsigned int age;
drm_buf_t *buf;
struct drm_radeon_freelist *next;
struct drm_radeon_freelist *prev;
} drm_radeon_freelist_t;
typedef struct drm_radeon_ring_buffer {
@ -132,8 +131,8 @@ typedef struct drm_radeon_private {
int cp_mode;
int cp_running;
drm_radeon_freelist_t *head;
drm_radeon_freelist_t *tail;
drm_radeon_freelist_t *head;
drm_radeon_freelist_t *tail;
int last_buf;
volatile u32 *scratch;
int writeback_works;
@ -193,13 +192,13 @@ typedef struct drm_radeon_private {
struct mem_block *fb_heap;
/* SW interrupt */
wait_queue_head_t swi_queue;
atomic_t swi_emitted;
wait_queue_head_t swi_queue;
atomic_t swi_emitted;
/* starting from here on, data is preserved accross an open */
uint32_t flags; /* see radeon_chip_flags */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
struct radeon_i2c_chan i2c[4];
struct radeon_i2c_chan i2c[4];
#endif
} drm_radeon_private_t;
@ -208,63 +207,66 @@ typedef struct drm_radeon_buf_priv {
} drm_radeon_buf_priv_t;
/* radeon_cp.c */
extern int radeon_cp_init( DRM_IOCTL_ARGS );
extern int radeon_cp_start( DRM_IOCTL_ARGS );
extern int radeon_cp_stop( DRM_IOCTL_ARGS );
extern int radeon_cp_reset( DRM_IOCTL_ARGS );
extern int radeon_cp_idle( DRM_IOCTL_ARGS );
extern int radeon_cp_resume( DRM_IOCTL_ARGS );
extern int radeon_engine_reset( DRM_IOCTL_ARGS );
extern int radeon_fullscreen( DRM_IOCTL_ARGS );
extern int radeon_cp_buffers( DRM_IOCTL_ARGS );
extern int radeon_cp_init(DRM_IOCTL_ARGS);
extern int radeon_cp_start(DRM_IOCTL_ARGS);
extern int radeon_cp_stop(DRM_IOCTL_ARGS);
extern int radeon_cp_reset(DRM_IOCTL_ARGS);
extern int radeon_cp_idle(DRM_IOCTL_ARGS);
extern int radeon_cp_resume(DRM_IOCTL_ARGS);
extern int radeon_engine_reset(DRM_IOCTL_ARGS);
extern int radeon_fullscreen(DRM_IOCTL_ARGS);
extern int radeon_cp_buffers(DRM_IOCTL_ARGS);
extern void radeon_freelist_reset( drm_device_t *dev );
extern drm_buf_t *radeon_freelist_get( drm_device_t *dev );
extern void radeon_freelist_reset(drm_device_t * dev);
extern drm_buf_t *radeon_freelist_get(drm_device_t * dev);
extern int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n );
extern int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n);
extern int radeon_do_cp_idle( drm_radeon_private_t *dev_priv );
extern int radeon_do_cleanup_cp( drm_device_t *dev );
extern int radeon_do_cleanup_pageflip( drm_device_t *dev );
extern int radeon_do_cp_idle(drm_radeon_private_t * dev_priv);
extern int radeon_do_cleanup_cp(drm_device_t * dev);
extern int radeon_do_cleanup_pageflip(drm_device_t * dev);
/* radeon_state.c */
extern int radeon_cp_clear( DRM_IOCTL_ARGS );
extern int radeon_cp_swap( DRM_IOCTL_ARGS );
extern int radeon_cp_vertex( DRM_IOCTL_ARGS );
extern int radeon_cp_indices( DRM_IOCTL_ARGS );
extern int radeon_cp_texture( DRM_IOCTL_ARGS );
extern int radeon_cp_stipple( DRM_IOCTL_ARGS );
extern int radeon_cp_indirect( DRM_IOCTL_ARGS );
extern int radeon_cp_vertex2( DRM_IOCTL_ARGS );
extern int radeon_cp_cmdbuf( DRM_IOCTL_ARGS );
extern int radeon_cp_getparam( DRM_IOCTL_ARGS );
extern int radeon_cp_setparam( DRM_IOCTL_ARGS );
extern int radeon_cp_flip( DRM_IOCTL_ARGS );
extern int radeon_cp_clear(DRM_IOCTL_ARGS);
extern int radeon_cp_swap(DRM_IOCTL_ARGS);
extern int radeon_cp_vertex(DRM_IOCTL_ARGS);
extern int radeon_cp_indices(DRM_IOCTL_ARGS);
extern int radeon_cp_texture(DRM_IOCTL_ARGS);
extern int radeon_cp_stipple(DRM_IOCTL_ARGS);
extern int radeon_cp_indirect(DRM_IOCTL_ARGS);
extern int radeon_cp_vertex2(DRM_IOCTL_ARGS);
extern int radeon_cp_cmdbuf(DRM_IOCTL_ARGS);
extern int radeon_cp_getparam(DRM_IOCTL_ARGS);
extern int radeon_cp_setparam(DRM_IOCTL_ARGS);
extern int radeon_cp_flip(DRM_IOCTL_ARGS);
extern int radeon_mem_alloc( DRM_IOCTL_ARGS );
extern int radeon_mem_free( DRM_IOCTL_ARGS );
extern int radeon_mem_init_heap( DRM_IOCTL_ARGS );
extern void radeon_mem_takedown( struct mem_block **heap );
extern void radeon_mem_release( DRMFILE filp, struct mem_block *heap );
extern int radeon_mem_alloc(DRM_IOCTL_ARGS);
extern int radeon_mem_free(DRM_IOCTL_ARGS);
extern int radeon_mem_init_heap(DRM_IOCTL_ARGS);
extern void radeon_mem_takedown(struct mem_block **heap);
extern void radeon_mem_release(DRMFILE filp, struct mem_block *heap);
/* radeon_irq.c */
extern int radeon_irq_emit( DRM_IOCTL_ARGS );
extern int radeon_irq_wait( DRM_IOCTL_ARGS );
extern int radeon_irq_emit(DRM_IOCTL_ARGS);
extern int radeon_irq_wait(DRM_IOCTL_ARGS);
extern int radeon_emit_and_wait_irq(drm_device_t *dev);
extern int radeon_wait_irq(drm_device_t *dev, int swi_nr);
extern int radeon_emit_irq(drm_device_t *dev);
extern int radeon_emit_and_wait_irq(drm_device_t * dev);
extern int radeon_wait_irq(drm_device_t * dev, int swi_nr);
extern int radeon_emit_irq(drm_device_t * dev);
extern void radeon_do_release(drm_device_t *dev);
extern int radeon_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence);
extern irqreturn_t radeon_driver_irq_handler( DRM_IRQ_ARGS );
extern void radeon_driver_irq_preinstall( drm_device_t *dev );
extern void radeon_driver_irq_postinstall( drm_device_t *dev );
extern void radeon_driver_irq_uninstall( drm_device_t *dev );
extern void radeon_driver_prerelease(drm_device_t *dev, DRMFILE filp);
extern void radeon_driver_pretakedown(drm_device_t *dev);
extern int radeon_driver_open_helper(drm_device_t *dev, drm_file_t *filp_priv);
extern void radeon_driver_free_filp_priv(drm_device_t *dev, drm_file_t *filp_priv);
extern void radeon_do_release(drm_device_t * dev);
extern int radeon_driver_vblank_wait(drm_device_t * dev,
unsigned int *sequence);
extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
extern void radeon_driver_irq_preinstall(drm_device_t * dev);
extern void radeon_driver_irq_postinstall(drm_device_t * dev);
extern void radeon_driver_irq_uninstall(drm_device_t * dev);
extern void radeon_driver_prerelease(drm_device_t * dev, DRMFILE filp);
extern void radeon_driver_pretakedown(drm_device_t * dev);
extern int radeon_driver_open_helper(drm_device_t * dev,
drm_file_t * filp_priv);
extern void radeon_driver_free_filp_priv(drm_device_t * dev,
drm_file_t * filp_priv);
/* Flags for stats.boxes
*/
@ -278,7 +280,7 @@ extern void radeon_driver_free_filp_priv(drm_device_t *dev, drm_file_t *filp_pri
* for Radeon kernel driver.
*/
#define RADEON_AGP_COMMAND 0x0f60
#define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config*/
#define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config */
# define RADEON_AGP_ENABLE (1<<8)
#define RADEON_AUX_SCISSOR_CNTL 0x26f0
@ -350,7 +352,6 @@ extern void radeon_driver_free_filp_priv(drm_device_t *dev, drm_file_t *filp_pri
? DRM_READ32( dev_priv->ring_rptr, RADEON_SCRATCHOFF(x) ) \
: RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x) ) )
#define RADEON_GEN_INT_CNTL 0x0040
# define RADEON_CRTC_VBLANK_MASK (1 << 0)
# define RADEON_GUI_IDLE_INT_ENABLE (1 << 19)
@ -551,7 +552,6 @@ extern void radeon_driver_free_filp_priv(drm_device_t *dev, drm_file_t *filp_pri
# define RADEON_DEPTH_FORMAT_16BIT_INT_Z (0 << 0)
# define RADEON_DEPTH_FORMAT_24BIT_INT_Z (2 << 0)
/* CP registers */
#define RADEON_CP_ME_RAM_ADDR 0x07d4
#define RADEON_CP_ME_RAM_RADDR 0x07d8
@ -740,11 +740,10 @@ extern void radeon_driver_free_filp_priv(drm_device_t *dev, drm_file_t *filp_pri
#define R200_RE_POINTSIZE 0x2648
#define R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0 0x2254
#define RADEON_PP_TEX_SIZE_0 0x1d04 /* NPOT */
#define RADEON_PP_TEX_SIZE_0 0x1d04 /* NPOT */
#define RADEON_PP_TEX_SIZE_1 0x1d0c
#define RADEON_PP_TEX_SIZE_2 0x1d14
#define SE_VAP_CNTL__TCL_ENA_MASK 0x00000001
#define SE_VAP_CNTL__FORCE_W_TO_ONE_MASK 0x00010000
#define SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT 0x00000012
@ -785,10 +784,10 @@ do { \
RADEON_WRITE( RADEON_CLOCK_CNTL_DATA, (val) ); \
} while (0)
extern int RADEON_READ_PLL( drm_device_t *dev, int addr );
extern int radeon_preinit( struct drm_device *dev, unsigned long flags );
extern int radeon_postinit( struct drm_device *dev, unsigned long flags );
extern int radeon_postcleanup( struct drm_device *dev );
extern int RADEON_READ_PLL(drm_device_t * dev, int addr);
extern int radeon_preinit(struct drm_device *dev, unsigned long flags);
extern int radeon_postinit(struct drm_device *dev, unsigned long flags);
extern int radeon_postcleanup(struct drm_device *dev);
#define CP_PACKET0( reg, n ) \
(RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
@ -801,7 +800,6 @@ extern int radeon_postcleanup( struct drm_device *dev );
#define CP_PACKET3( pkt, n ) \
(RADEON_CP_PACKET3 | (pkt) | ((n) << 16))
/* ================================================================
* Engine control helper macros
*/
@ -850,7 +848,6 @@ extern int radeon_postcleanup( struct drm_device *dev );
OUT_RING( RADEON_RB3D_ZC_FLUSH_ALL ); \
} while (0)
/* ================================================================
* Misc helper macros
*/
@ -892,7 +889,6 @@ do { \
OUT_RING( age ); \
} while (0)
/* ================================================================
* Ring control
*/
@ -953,7 +949,6 @@ do { \
OUT_RING( val ); \
} while (0)
#define OUT_RING_USER_TABLE( tab, sz ) do { \
int _size = (sz); \
int __user *_tab = (tab); \
@ -976,5 +971,4 @@ do { \
write &= mask; \
} while (0)
#endif /* __RADEON_DRV_H__ */
#endif /* __RADEON_DRV_H__ */

View File

@ -53,31 +53,31 @@
* tied to dma at all, this is just a hangover from dri prehistory.
*/
irqreturn_t radeon_driver_irq_handler( DRM_IRQ_ARGS )
irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
{
drm_device_t *dev = (drm_device_t *) arg;
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *)dev->dev_private;
u32 stat;
(drm_radeon_private_t *) dev->dev_private;
u32 stat;
/* Only consider the bits we're interested in - others could be used
* outside the DRM
*/
stat = RADEON_READ(RADEON_GEN_INT_STATUS)
& (RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT);
& (RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT);
if (!stat)
return IRQ_NONE;
/* SW interrupt */
if (stat & RADEON_SW_INT_TEST) {
DRM_WAKEUP( &dev_priv->swi_queue );
DRM_WAKEUP(&dev_priv->swi_queue);
}
/* VBLANK interrupt */
if (stat & RADEON_CRTC_VBLANK_STAT) {
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals( dev );
drm_vbl_send_signals(dev);
}
/* Acknowledge interrupts we handle */
@ -85,15 +85,15 @@ irqreturn_t radeon_driver_irq_handler( DRM_IRQ_ARGS )
return IRQ_HANDLED;
}
static __inline__ void radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv)
static __inline__ void radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv)
{
u32 tmp = RADEON_READ( RADEON_GEN_INT_STATUS )
& (RADEON_SW_INT_TEST_ACK | RADEON_CRTC_VBLANK_STAT);
u32 tmp = RADEON_READ(RADEON_GEN_INT_STATUS)
& (RADEON_SW_INT_TEST_ACK | RADEON_CRTC_VBLANK_STAT);
if (tmp)
RADEON_WRITE( RADEON_GEN_INT_STATUS, tmp );
RADEON_WRITE(RADEON_GEN_INT_STATUS, tmp);
}
int radeon_emit_irq(drm_device_t *dev)
int radeon_emit_irq(drm_device_t * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
unsigned int ret;
@ -102,57 +102,55 @@ int radeon_emit_irq(drm_device_t *dev)
atomic_inc(&dev_priv->swi_emitted);
ret = atomic_read(&dev_priv->swi_emitted);
BEGIN_RING( 4 );
OUT_RING_REG( RADEON_LAST_SWI_REG, ret );
OUT_RING_REG( RADEON_GEN_INT_STATUS, RADEON_SW_INT_FIRE );
BEGIN_RING(4);
OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
OUT_RING_REG(RADEON_GEN_INT_STATUS, RADEON_SW_INT_FIRE);
ADVANCE_RING();
COMMIT_RING();
COMMIT_RING();
return ret;
}
int radeon_wait_irq(drm_device_t *dev, int swi_nr)
int radeon_wait_irq(drm_device_t * dev, int swi_nr)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *)dev->dev_private;
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
int ret = 0;
if (RADEON_READ( RADEON_LAST_SWI_REG ) >= swi_nr)
return 0;
if (RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr)
return 0;
dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
/* This is a hack to work around mysterious freezes on certain
* systems:
*/
radeon_acknowledge_irqs( dev_priv );
radeon_acknowledge_irqs(dev_priv);
DRM_WAIT_ON( ret, dev_priv->swi_queue, 3 * DRM_HZ,
RADEON_READ( RADEON_LAST_SWI_REG ) >= swi_nr );
DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * DRM_HZ,
RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr);
return ret;
}
int radeon_emit_and_wait_irq(drm_device_t *dev)
int radeon_emit_and_wait_irq(drm_device_t * dev)
{
return radeon_wait_irq( dev, radeon_emit_irq(dev) );
return radeon_wait_irq(dev, radeon_emit_irq(dev));
}
int radeon_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
int radeon_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *)dev->dev_private;
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
unsigned int cur_vblank;
int ret = 0;
if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
}
radeon_acknowledge_irqs( dev_priv );
radeon_acknowledge_irqs(dev_priv);
dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
@ -160,98 +158,97 @@ int radeon_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
* by about a day rather than she wants to wait for years
* using vertical blanks...
*/
DRM_WAIT_ON( ret, dev->vbl_queue, 3*DRM_HZ,
( ( ( cur_vblank = atomic_read(&dev->vbl_received ) )
- *sequence ) <= (1<<23) ) );
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(&dev->vbl_received))
- *sequence) <= (1 << 23)));
*sequence = cur_vblank;
return ret;
}
/* Needs the lock as it touches the ring.
*/
int radeon_irq_emit( DRM_IOCTL_ARGS )
int radeon_irq_emit(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_irq_emit_t emit;
int result;
LOCK_TEST_WITH_RETURN( dev, filp );
LOCK_TEST_WITH_RETURN(dev, filp);
if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
}
DRM_COPY_FROM_USER_IOCTL( emit, (drm_radeon_irq_emit_t __user *)data,
sizeof(emit) );
DRM_COPY_FROM_USER_IOCTL(emit, (drm_radeon_irq_emit_t __user *) data,
sizeof(emit));
result = radeon_emit_irq( dev );
result = radeon_emit_irq(dev);
if ( DRM_COPY_TO_USER( emit.irq_seq, &result, sizeof(int) ) ) {
DRM_ERROR( "copy_to_user\n" );
if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return DRM_ERR(EFAULT);
}
return 0;
}
/* Doesn't need the hardware lock.
*/
int radeon_irq_wait( DRM_IOCTL_ARGS )
int radeon_irq_wait(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_irq_wait_t irqwait;
if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
}
DRM_COPY_FROM_USER_IOCTL( irqwait, (drm_radeon_irq_wait_t __user*)data,
sizeof(irqwait) );
DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_radeon_irq_wait_t __user *) data,
sizeof(irqwait));
return radeon_wait_irq( dev, irqwait.irq_seq );
return radeon_wait_irq(dev, irqwait.irq_seq);
}
/* drm_dma.h hooks
*/
void radeon_driver_irq_preinstall( drm_device_t *dev ) {
void radeon_driver_irq_preinstall(drm_device_t * dev)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *)dev->dev_private;
(drm_radeon_private_t *) dev->dev_private;
/* Disable *all* interrupts */
RADEON_WRITE( RADEON_GEN_INT_CNTL, 0 );
/* Disable *all* interrupts */
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
/* Clear bits if they're already high */
radeon_acknowledge_irqs( dev_priv );
radeon_acknowledge_irqs(dev_priv);
}
void radeon_driver_irq_postinstall( drm_device_t *dev ) {
void radeon_driver_irq_postinstall(drm_device_t * dev)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *)dev->dev_private;
(drm_radeon_private_t *) dev->dev_private;
atomic_set(&dev_priv->swi_emitted, 0);
DRM_INIT_WAITQUEUE( &dev_priv->swi_queue );
atomic_set(&dev_priv->swi_emitted, 0);
DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
/* Turn on SW and VBL ints */
RADEON_WRITE( RADEON_GEN_INT_CNTL,
RADEON_CRTC_VBLANK_MASK |
RADEON_SW_INT_ENABLE );
RADEON_WRITE(RADEON_GEN_INT_CNTL,
RADEON_CRTC_VBLANK_MASK | RADEON_SW_INT_ENABLE);
}
void radeon_driver_irq_uninstall( drm_device_t *dev ) {
void radeon_driver_irq_uninstall(drm_device_t * dev)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *)dev->dev_private;
(drm_radeon_private_t *) dev->dev_private;
if (!dev_priv)
return;
/* Disable *all* interrupts */
RADEON_WRITE( RADEON_GEN_INT_CNTL, 0 );
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
}

View File

@ -39,11 +39,12 @@
*/
static struct mem_block *split_block(struct mem_block *p, int start, int size,
DRMFILE filp )
DRMFILE filp)
{
/* Maybe cut off the start of an existing block */
if (start > p->start) {
struct mem_block *newblock = drm_alloc(sizeof(*newblock), DRM_MEM_BUFS );
struct mem_block *newblock =
drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
if (!newblock)
goto out;
newblock->start = start;
@ -59,7 +60,8 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
/* Maybe cut off the end of an existing block */
if (size < p->size) {
struct mem_block *newblock = drm_alloc(sizeof(*newblock), DRM_MEM_BUFS );
struct mem_block *newblock =
drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
if (!newblock)
goto out;
newblock->start = start + size;
@ -72,40 +74,39 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
p->size = size;
}
out:
out:
/* Our block is in the middle */
p->filp = filp;
return p;
}
static struct mem_block *alloc_block( struct mem_block *heap, int size,
int align2, DRMFILE filp )
static struct mem_block *alloc_block(struct mem_block *heap, int size,
int align2, DRMFILE filp)
{
struct mem_block *p;
int mask = (1 << align2)-1;
int mask = (1 << align2) - 1;
for (p = heap->next ; p != heap ; p = p->next) {
for (p = heap->next; p != heap; p = p->next) {
int start = (p->start + mask) & ~mask;
if (p->filp == 0 && start + size <= p->start + p->size)
return split_block( p, start, size, filp );
return split_block(p, start, size, filp);
}
return NULL;
}
static struct mem_block *find_block( struct mem_block *heap, int start )
static struct mem_block *find_block(struct mem_block *heap, int start)
{
struct mem_block *p;
for (p = heap->next ; p != heap ; p = p->next)
for (p = heap->next; p != heap; p = p->next)
if (p->start == start)
return p;
return NULL;
}
static void free_block( struct mem_block *p )
static void free_block(struct mem_block *p)
{
p->filp = NULL;
@ -117,7 +118,7 @@ static void free_block( struct mem_block *p )
p->size += q->size;
p->next = q->next;
p->next->prev = p;
drm_free(q, sizeof(*q), DRM_MEM_BUFS );
drm_free(q, sizeof(*q), DRM_MEM_BUFS);
}
if (p->prev->filp == 0) {
@ -125,7 +126,7 @@ static void free_block( struct mem_block *p )
q->size += p->size;
q->next = p->next;
q->next->prev = q;
drm_free(p, sizeof(*q), DRM_MEM_BUFS );
drm_free(p, sizeof(*q), DRM_MEM_BUFS);
}
}
@ -133,14 +134,14 @@ static void free_block( struct mem_block *p )
*/
static int init_heap(struct mem_block **heap, int start, int size)
{
struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS );
struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS);
if (!blocks)
return DRM_ERR(ENOMEM);
*heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS );
*heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS);
if (!*heap) {
drm_free( blocks, sizeof(*blocks), DRM_MEM_BUFS );
drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS);
return DRM_ERR(ENOMEM);
}
@ -149,23 +150,22 @@ static int init_heap(struct mem_block **heap, int start, int size)
blocks->filp = NULL;
blocks->next = blocks->prev = *heap;
memset( *heap, 0, sizeof(**heap) );
(*heap)->filp = (DRMFILE) -1;
memset(*heap, 0, sizeof(**heap));
(*heap)->filp = (DRMFILE) - 1;
(*heap)->next = (*heap)->prev = blocks;
return 0;
}
/* Free all blocks associated with the releasing file.
*/
void radeon_mem_release( DRMFILE filp, struct mem_block *heap )
void radeon_mem_release(DRMFILE filp, struct mem_block *heap)
{
struct mem_block *p;
if (!heap || !heap->next)
return;
for (p = heap->next ; p != heap ; p = p->next) {
for (p = heap->next; p != heap; p = p->next) {
if (p->filp == filp)
p->filp = NULL;
}
@ -173,46 +173,43 @@ void radeon_mem_release( DRMFILE filp, struct mem_block *heap )
/* Assumes a single contiguous range. Needs a special filp in
* 'heap' to stop it being subsumed.
*/
for (p = heap->next ; p != heap ; p = p->next) {
for (p = heap->next; p != heap; p = p->next) {
while (p->filp == 0 && p->next->filp == 0) {
struct mem_block *q = p->next;
p->size += q->size;
p->next = q->next;
p->next->prev = p;
drm_free(q, sizeof(*q),DRM_MEM_DRIVER);
drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
}
}
}
/* Shutdown.
*/
void radeon_mem_takedown( struct mem_block **heap )
void radeon_mem_takedown(struct mem_block **heap)
{
struct mem_block *p;
if (!*heap)
return;
for (p = (*heap)->next ; p != *heap ; ) {
for (p = (*heap)->next; p != *heap;) {
struct mem_block *q = p;
p = p->next;
drm_free(q, sizeof(*q),DRM_MEM_DRIVER);
drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
}
drm_free( *heap, sizeof(**heap),DRM_MEM_DRIVER );
drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER);
*heap = NULL;
}
/* IOCTL HANDLERS */
static struct mem_block **get_heap( drm_radeon_private_t *dev_priv,
int region )
static struct mem_block **get_heap(drm_radeon_private_t * dev_priv, int region)
{
switch( region ) {
switch (region) {
case RADEON_MEM_REGION_GART:
return &dev_priv->gart_heap;
return &dev_priv->gart_heap;
case RADEON_MEM_REGION_FB:
return &dev_priv->fb_heap;
default:
@ -220,22 +217,22 @@ static struct mem_block **get_heap( drm_radeon_private_t *dev_priv,
}
}
int radeon_mem_alloc( DRM_IOCTL_ARGS )
int radeon_mem_alloc(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_mem_alloc_t alloc;
struct mem_block *block, **heap;
if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
}
DRM_COPY_FROM_USER_IOCTL( alloc, (drm_radeon_mem_alloc_t __user *)data,
sizeof(alloc) );
DRM_COPY_FROM_USER_IOCTL(alloc, (drm_radeon_mem_alloc_t __user *) data,
sizeof(alloc));
heap = get_heap( dev_priv, alloc.region );
heap = get_heap(dev_priv, alloc.region);
if (!heap || !*heap)
return DRM_ERR(EFAULT);
@ -245,69 +242,66 @@ int radeon_mem_alloc( DRM_IOCTL_ARGS )
if (alloc.alignment < 12)
alloc.alignment = 12;
block = alloc_block( *heap, alloc.size, alloc.alignment,
filp );
block = alloc_block(*heap, alloc.size, alloc.alignment, filp);
if (!block)
return DRM_ERR(ENOMEM);
if ( DRM_COPY_TO_USER( alloc.region_offset, &block->start,
sizeof(int) ) ) {
DRM_ERROR( "copy_to_user\n" );
if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return DRM_ERR(EFAULT);
}
return 0;
}
int radeon_mem_free( DRM_IOCTL_ARGS )
int radeon_mem_free(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_mem_free_t memfree;
struct mem_block *block, **heap;
if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
}
DRM_COPY_FROM_USER_IOCTL( memfree, (drm_radeon_mem_free_t __user *)data,
sizeof(memfree) );
DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_mem_free_t __user *) data,
sizeof(memfree));
heap = get_heap( dev_priv, memfree.region );
heap = get_heap(dev_priv, memfree.region);
if (!heap || !*heap)
return DRM_ERR(EFAULT);
block = find_block( *heap, memfree.region_offset );
block = find_block(*heap, memfree.region_offset);
if (!block)
return DRM_ERR(EFAULT);
if (block->filp != filp)
return DRM_ERR(EPERM);
free_block( block );
free_block(block);
return 0;
}
int radeon_mem_init_heap( DRM_IOCTL_ARGS )
int radeon_mem_init_heap(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_mem_init_heap_t initheap;
struct mem_block **heap;
if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
}
DRM_COPY_FROM_USER_IOCTL( initheap, (drm_radeon_mem_init_heap_t __user *)data,
sizeof(initheap) );
DRM_COPY_FROM_USER_IOCTL(initheap,
(drm_radeon_mem_init_heap_t __user *) data,
sizeof(initheap));
heap = get_heap( dev_priv, initheap.region );
heap = get_heap(dev_priv, initheap.region);
if (!heap)
return DRM_ERR(EFAULT);
@ -316,7 +310,5 @@ int radeon_mem_init_heap( DRM_IOCTL_ARGS )
return DRM_ERR(EFAULT);
}
return init_heap( heap, initheap.start, initheap.size );
return init_heap(heap, initheap.start, initheap.size);
}

File diff suppressed because it is too large Load Diff

View File

@ -39,4 +39,4 @@ typedef struct {
unsigned int offset, size;
} drm_sis_fb_t;
#endif /* __SIS_DRM_H__ */
#endif /* __SIS_DRM_H__ */

View File

@ -46,14 +46,14 @@ typedef struct drm_sis_private {
memHeap_t *FBHeap;
} drm_sis_private_t;
extern int sis_fb_alloc( DRM_IOCTL_ARGS );
extern int sis_fb_free( DRM_IOCTL_ARGS );
extern int sis_ioctl_agp_init( DRM_IOCTL_ARGS );
extern int sis_ioctl_agp_alloc( DRM_IOCTL_ARGS );
extern int sis_ioctl_agp_free( DRM_IOCTL_ARGS );
extern int sis_fb_init( DRM_IOCTL_ARGS );
extern int sis_fb_alloc(DRM_IOCTL_ARGS);
extern int sis_fb_free(DRM_IOCTL_ARGS);
extern int sis_ioctl_agp_init(DRM_IOCTL_ARGS);
extern int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS);
extern int sis_ioctl_agp_free(DRM_IOCTL_ARGS);
extern int sis_fb_init(DRM_IOCTL_ARGS);
extern int sis_init_context(drm_device_t *dev, int context);
extern int sis_final_context(drm_device_t *dev, int context);
extern int sis_init_context(drm_device_t * dev, int context);
extern int sis_final_context(drm_device_t * dev, int context);
#endif

View File

@ -41,13 +41,13 @@ set_t *setInit(void)
int i;
set_t *set;
set = (set_t *)drm_alloc(sizeof(set_t), DRM_MEM_DRIVER);
set = (set_t *) drm_alloc(sizeof(set_t), DRM_MEM_DRIVER);
if (set != NULL) {
for (i = 0; i < SET_SIZE; i++) {
set->list[i].free_next = i + 1;
set->list[i].alloc_next = -1;
}
set->list[SET_SIZE-1].free_next = -1;
set->list[SET_SIZE - 1].free_next = -1;
set->free = 0;
set->alloc = -1;
set->trace = -1;
@ -55,7 +55,7 @@ set_t *setInit(void)
return set;
}
int setAdd(set_t *set, ITEM_TYPE item)
int setAdd(set_t * set, ITEM_TYPE item)
{
int free = set->free;
@ -73,7 +73,7 @@ int setAdd(set_t *set, ITEM_TYPE item)
return 1;
}
int setDel(set_t *set, ITEM_TYPE item)
int setDel(set_t * set, ITEM_TYPE item)
{
int alloc = set->alloc;
int prev = -1;
@ -103,7 +103,7 @@ int setDel(set_t *set, ITEM_TYPE item)
/* setFirst -> setAdd -> setNext is wrong */
int setFirst(set_t *set, ITEM_TYPE *item)
int setFirst(set_t * set, ITEM_TYPE * item)
{
if (set->alloc == -1)
return 0;
@ -114,7 +114,7 @@ int setFirst(set_t *set, ITEM_TYPE *item)
return 1;
}
int setNext(set_t *set, ITEM_TYPE *item)
int setNext(set_t * set, ITEM_TYPE * item)
{
if (set->trace == -1)
return 0;
@ -125,7 +125,7 @@ int setNext(set_t *set, ITEM_TYPE *item)
return 1;
}
int setDestroy(set_t *set)
int setDestroy(set_t * set)
{
drm_free(set, sizeof(set_t), DRM_MEM_DRIVER);
@ -158,26 +158,25 @@ int setDestroy(set_t *set)
#define ISFREE(bptr) ((bptr)->free)
memHeap_t *mmInit(int ofs,
int size)
memHeap_t *mmInit(int ofs, int size)
{
PMemBlock blocks;
if (size <= 0)
return NULL;
blocks = (TMemBlock *)drm_calloc(1, sizeof(TMemBlock), DRM_MEM_DRIVER);
blocks = (TMemBlock *) drm_calloc(1, sizeof(TMemBlock), DRM_MEM_DRIVER);
if (blocks != NULL) {
blocks->ofs = ofs;
blocks->size = size;
blocks->free = 1;
return (memHeap_t *)blocks;
return (memHeap_t *) blocks;
} else
return NULL;
}
/* Checks if a pointer 'b' is part of the heap 'heap' */
int mmBlockInHeap(memHeap_t *heap, PMemBlock b)
int mmBlockInHeap(memHeap_t * heap, PMemBlock b)
{
TMemBlock *p;
@ -196,12 +195,10 @@ int mmBlockInHeap(memHeap_t *heap, PMemBlock b)
/* Kludgey workaround for existing i810 server. Remove soon.
*/
memHeap_t *mmAddRange( memHeap_t *heap,
int ofs,
int size )
memHeap_t *mmAddRange(memHeap_t * heap, int ofs, int size)
{
PMemBlock blocks;
blocks = (TMemBlock *)drm_calloc(2, sizeof(TMemBlock), DRM_MEM_DRIVER);
blocks = (TMemBlock *) drm_calloc(2, sizeof(TMemBlock), DRM_MEM_DRIVER);
if (blocks != NULL) {
blocks[0].size = size;
blocks[0].free = 1;
@ -213,14 +210,14 @@ memHeap_t *mmAddRange( memHeap_t *heap,
*/
blocks[1].size = 0;
blocks[1].free = 0;
blocks[1].ofs = ofs+size;
blocks[1].next = (PMemBlock)heap;
return (memHeap_t *)blocks;
blocks[1].ofs = ofs + size;
blocks[1].next = (PMemBlock) heap;
return (memHeap_t *) blocks;
} else
return heap;
}
static TMemBlock* SliceBlock(TMemBlock *p,
static TMemBlock *SliceBlock(TMemBlock * p,
int startofs, int size,
int reserved, int alignment)
{
@ -228,8 +225,8 @@ static TMemBlock* SliceBlock(TMemBlock *p,
/* break left */
if (startofs > p->ofs) {
newblock = (TMemBlock*) drm_calloc(1, sizeof(TMemBlock),
DRM_MEM_DRIVER);
newblock = (TMemBlock *) drm_calloc(1, sizeof(TMemBlock),
DRM_MEM_DRIVER);
newblock->ofs = startofs;
newblock->size = p->size - (startofs - p->ofs);
newblock->free = 1;
@ -241,8 +238,8 @@ static TMemBlock* SliceBlock(TMemBlock *p,
/* break right */
if (size < p->size) {
newblock = (TMemBlock*) drm_calloc(1, sizeof(TMemBlock),
DRM_MEM_DRIVER);
newblock = (TMemBlock *) drm_calloc(1, sizeof(TMemBlock),
DRM_MEM_DRIVER);
newblock->ofs = startofs + size;
newblock->size = p->size - size;
newblock->free = 1;
@ -258,37 +255,37 @@ static TMemBlock* SliceBlock(TMemBlock *p,
return p;
}
PMemBlock mmAllocMem( memHeap_t *heap, int size, int align2, int startSearch)
PMemBlock mmAllocMem(memHeap_t * heap, int size, int align2, int startSearch)
{
int mask,startofs, endofs;
int mask, startofs, endofs;
TMemBlock *p;
if (heap == NULL || align2 < 0 || size <= 0)
return NULL;
mask = (1 << align2)-1;
mask = (1 << align2) - 1;
startofs = 0;
p = (TMemBlock *)heap;
p = (TMemBlock *) heap;
while (p != NULL) {
if (ISFREE(p)) {
startofs = (p->ofs + mask) & ~mask;
if ( startofs < startSearch ) {
if (startofs < startSearch) {
startofs = startSearch;
}
endofs = startofs+size;
if (endofs <= (p->ofs+p->size))
endofs = startofs + size;
if (endofs <= (p->ofs + p->size))
break;
}
p = p->next;
}
if (p == NULL)
return NULL;
p = SliceBlock(p,startofs,size,0,mask+1);
p = SliceBlock(p, startofs, size, 0, mask + 1);
p->heap = heap;
return p;
}
static __inline__ int Join2Blocks(TMemBlock *p)
static __inline__ int Join2Blocks(TMemBlock * p)
{
if (p->free && p->next && p->next->free) {
TMemBlock *q = p->next;
@ -321,11 +318,11 @@ int mmFreeMem(PMemBlock b)
p->free = 1;
Join2Blocks(p);
if (prev)
Join2Blocks(prev);
Join2Blocks(prev);
return 0;
}
int mmReserveMem(memHeap_t *heap, int offset,int size)
int mmReserveMem(memHeap_t * heap, int offset, int size)
{
int endofs;
TMemBlock *p;
@ -334,10 +331,10 @@ int mmReserveMem(memHeap_t *heap, int offset,int size)
return -1;
endofs = offset + size;
p = (TMemBlock *)heap;
p = (TMemBlock *) heap;
while (p && p->ofs <= offset) {
if (ISFREE(p) && endofs <= (p->ofs+p->size)) {
SliceBlock(p,offset,size,1,1);
if (ISFREE(p) && endofs <= (p->ofs + p->size)) {
SliceBlock(p, offset, size, 1, 1);
return 0;
}
p = p->next;
@ -345,14 +342,14 @@ int mmReserveMem(memHeap_t *heap, int offset,int size)
return -1;
}
int mmFreeReserved(memHeap_t *heap, int offset)
int mmFreeReserved(memHeap_t * heap, int offset)
{
TMemBlock *p,*prev;
TMemBlock *p, *prev;
if (heap == NULL)
return -1;
p = (TMemBlock *)heap;
p = (TMemBlock *) heap;
prev = NULL;
while (p != NULL && p->ofs != offset) {
prev = p;
@ -369,14 +366,14 @@ int mmFreeReserved(memHeap_t *heap, int offset)
return 0;
}
void mmDestroy(memHeap_t *heap)
void mmDestroy(memHeap_t * heap)
{
TMemBlock *p,*q;
TMemBlock *p, *q;
if (heap == NULL)
return;
p = (TMemBlock *)heap;
p = (TMemBlock *) heap;
while (p != NULL) {
q = p->next;
drm_free(p, sizeof(TMemBlock), DRM_MEM_DRIVER);

View File

@ -50,11 +50,11 @@ typedef struct {
} set_t;
set_t *setInit(void);
int setAdd(set_t *set, ITEM_TYPE item);
int setDel(set_t *set, ITEM_TYPE item);
int setFirst(set_t *set, ITEM_TYPE *item);
int setNext(set_t *set, ITEM_TYPE *item);
int setDestroy(set_t *set);
int setAdd(set_t * set, ITEM_TYPE item);
int setDel(set_t * set, ITEM_TYPE item);
int setFirst(set_t * set, ITEM_TYPE * item);
int setNext(set_t * set, ITEM_TYPE * item);
int setDestroy(set_t * set);
/*
* GLX Hardware Device Driver common code
@ -83,7 +83,7 @@ int setDestroy(set_t *set);
struct mem_block_t {
struct mem_block_t *next;
struct mem_block_t *heap;
int ofs,size;
int ofs, size;
int align;
int free:1;
int reserved:1;
@ -113,11 +113,9 @@ static __inline__ void mmMarkReserved(PMemBlock b)
* input: total size in bytes
* return: a heap pointer if OK, NULL if error
*/
memHeap_t *mmInit( int ofs, int size );
memHeap_t *mmInit(int ofs, int size);
memHeap_t *mmAddRange( memHeap_t *heap,
int ofs,
int size );
memHeap_t *mmAddRange(memHeap_t * heap, int ofs, int size);
/*
* Allocate 'size' bytes with 2^align2 bytes alignment,
@ -129,19 +127,19 @@ memHeap_t *mmAddRange( memHeap_t *heap,
* startSearch = linear offset from start of heap to begin search
* return: pointer to the allocated block, 0 if error
*/
PMemBlock mmAllocMem( memHeap_t *heap, int size, int align2, int startSearch );
PMemBlock mmAllocMem(memHeap_t * heap, int size, int align2, int startSearch);
/*
* Returns 1 if the block 'b' is part of the heap 'heap'
*/
int mmBlockInHeap( PMemBlock heap, PMemBlock b );
int mmBlockInHeap(PMemBlock heap, PMemBlock b);
/*
* Free block starts at offset
* input: pointer to a block
* return: 0 if OK, -1 if error
*/
int mmFreeMem( PMemBlock b );
int mmFreeMem(PMemBlock b);
/*
* Reserve 'size' bytes block start at offset
@ -150,15 +148,15 @@ int mmFreeMem( PMemBlock b );
* input: size, offset
* output: 0 if OK, -1 if error
*/
int mmReserveMem( memHeap_t *heap, int offset,int size );
int mmFreeReserved( memHeap_t *heap, int offset );
int mmReserveMem(memHeap_t * heap, int offset, int size);
int mmFreeReserved(memHeap_t * heap, int offset);
/*
* destroy MM
*/
void mmDestroy( memHeap_t *mmInit );
void mmDestroy(memHeap_t * mmInit);
/* For debuging purpose. */
void mmDumpMemInfo( memHeap_t *mmInit );
void mmDumpMemInfo(memHeap_t * mmInit);
#endif /* __SIS_DS_H__ */
#endif /* __SIS_DS_H__ */

View File

@ -28,10 +28,6 @@
*
*/
#include "drmP.h"
#include "sis_drm.h"
#include "sis_drv.h"
#include "sis_ds.h"
#if defined(__linux__) && defined(CONFIG_FB_SIS)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
#include <video/sisfb.h>
@ -39,6 +35,10 @@
#include <linux/sisfb.h>
#endif
#endif
#include "drmP.h"
#include "sis_drm.h"
#include "sis_drv.h"
#include "sis_ds.h"
#define MAX_CONTEXT 100
#define VIDEO_TYPE 0
@ -47,19 +47,17 @@
typedef struct {
int used;
int context;
set_t *sets[2]; /* 0 for video, 1 for AGP */
set_t *sets[2]; /* 0 for video, 1 for AGP */
} sis_context_t;
static sis_context_t global_ppriv[MAX_CONTEXT];
static int add_alloc_set(int context, int type, unsigned int val)
{
int i, retval = 0;
for (i = 0; i < MAX_CONTEXT; i++) {
if (global_ppriv[i].used && global_ppriv[i].context == context)
{
if (global_ppriv[i].used && global_ppriv[i].context == context) {
retval = setAdd(global_ppriv[i].sets[type], val);
break;
}
@ -72,8 +70,7 @@ static int del_alloc_set(int context, int type, unsigned int val)
int i, retval = 0;
for (i = 0; i < MAX_CONTEXT; i++) {
if (global_ppriv[i].used && global_ppriv[i].context == context)
{
if (global_ppriv[i].used && global_ppriv[i].context == context) {
retval = setDel(global_ppriv[i].sets[type], val);
break;
}
@ -84,12 +81,12 @@ static int del_alloc_set(int context, int type, unsigned int val)
/* fb management via fb device */
#if defined(__linux__) && defined(CONFIG_FB_SIS)
int sis_fb_init( DRM_IOCTL_ARGS )
int sis_fb_init(DRM_IOCTL_ARGS)
{
return 0;
}
int sis_fb_alloc( DRM_IOCTL_ARGS )
int sis_fb_alloc(DRM_IOCTL_ARGS)
{
drm_sis_mem_t fb;
struct sis_memreq req;
@ -122,12 +119,12 @@ int sis_fb_alloc( DRM_IOCTL_ARGS )
return retval;
}
int sis_fb_free( DRM_IOCTL_ARGS )
int sis_fb_free(DRM_IOCTL_ARGS)
{
drm_sis_mem_t fb;
int retval = 0;
DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t __user *)data, sizeof(fb));
DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t __user *) data, sizeof(fb));
if (!fb.free)
return DRM_ERR(EINVAL);
@ -153,17 +150,17 @@ int sis_fb_free( DRM_IOCTL_ARGS )
* X driver/sisfb HW- Command-
* framebuffer memory DRI heap Cursor queue
*/
int sis_fb_init( DRM_IOCTL_ARGS )
int sis_fb_init(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_fb_t fb;
DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_fb_t __user *)data, sizeof(fb));
DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_fb_t __user *) data, sizeof(fb));
if (dev_priv == NULL) {
dev->dev_private = drm_calloc(1, sizeof(drm_sis_private_t),
DRM_MEM_DRIVER);
DRM_MEM_DRIVER);
dev_priv = dev->dev_private;
if (dev_priv == NULL)
return ENOMEM;
@ -179,7 +176,7 @@ int sis_fb_init( DRM_IOCTL_ARGS )
return 0;
}
int sis_fb_alloc( DRM_IOCTL_ARGS )
int sis_fb_alloc(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
@ -200,7 +197,7 @@ int sis_fb_alloc( DRM_IOCTL_ARGS )
fb.free = (unsigned long)block;
if (!add_alloc_set(fb.context, VIDEO_TYPE, fb.free)) {
DRM_DEBUG("adding to allocation set fails\n");
mmFreeMem((PMemBlock)fb.free);
mmFreeMem((PMemBlock) fb.free);
retval = DRM_ERR(EINVAL);
}
} else {
@ -216,7 +213,7 @@ int sis_fb_alloc( DRM_IOCTL_ARGS )
return retval;
}
int sis_fb_free( DRM_IOCTL_ARGS )
int sis_fb_free(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
@ -225,14 +222,14 @@ int sis_fb_free( DRM_IOCTL_ARGS )
if (dev_priv == NULL || dev_priv->FBHeap == NULL)
return DRM_ERR(EINVAL);
DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t __user *)data, sizeof(fb));
DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t __user *) data, sizeof(fb));
if (!mmBlockInHeap(dev_priv->FBHeap, (PMemBlock)fb.free))
if (!mmBlockInHeap(dev_priv->FBHeap, (PMemBlock) fb.free))
return DRM_ERR(EINVAL);
if (!del_alloc_set(fb.context, VIDEO_TYPE, fb.free))
return DRM_ERR(EINVAL);
mmFreeMem((PMemBlock)fb.free);
mmFreeMem((PMemBlock) fb.free);
DRM_DEBUG("free fb, free = 0x%lx\n", fb.free);
@ -243,7 +240,7 @@ int sis_fb_free( DRM_IOCTL_ARGS )
/* agp memory management */
int sis_ioctl_agp_init( DRM_IOCTL_ARGS )
int sis_ioctl_agp_init(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
@ -251,7 +248,7 @@ int sis_ioctl_agp_init( DRM_IOCTL_ARGS )
if (dev_priv == NULL) {
dev->dev_private = drm_calloc(1, sizeof(drm_sis_private_t),
DRM_MEM_DRIVER);
DRM_MEM_DRIVER);
dev_priv = dev->dev_private;
if (dev_priv == NULL)
return ENOMEM;
@ -260,7 +257,8 @@ int sis_ioctl_agp_init( DRM_IOCTL_ARGS )
if (dev_priv->AGPHeap != NULL)
return DRM_ERR(EINVAL);
DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t __user *)data, sizeof(agp));
DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t __user *) data,
sizeof(agp));
dev_priv->AGPHeap = mmInit(agp.offset, agp.size);
@ -269,7 +267,7 @@ int sis_ioctl_agp_init( DRM_IOCTL_ARGS )
return 0;
}
int sis_ioctl_agp_alloc( DRM_IOCTL_ARGS )
int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
@ -290,7 +288,7 @@ int sis_ioctl_agp_alloc( DRM_IOCTL_ARGS )
agp.free = (unsigned long)block;
if (!add_alloc_set(agp.context, AGP_TYPE, agp.free)) {
DRM_DEBUG("adding to allocation set fails\n");
mmFreeMem((PMemBlock)agp.free);
mmFreeMem((PMemBlock) agp.free);
retval = -1;
}
} else {
@ -306,7 +304,7 @@ int sis_ioctl_agp_alloc( DRM_IOCTL_ARGS )
return retval;
}
int sis_ioctl_agp_free( DRM_IOCTL_ARGS )
int sis_ioctl_agp_free(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
@ -315,12 +313,13 @@ int sis_ioctl_agp_free( DRM_IOCTL_ARGS )
if (dev_priv == NULL || dev_priv->AGPHeap == NULL)
return DRM_ERR(EINVAL);
DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_mem_t __user *)data, sizeof(agp));
DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_mem_t __user *) data,
sizeof(agp));
if (!mmBlockInHeap(dev_priv->AGPHeap, (PMemBlock)agp.free))
if (!mmBlockInHeap(dev_priv->AGPHeap, (PMemBlock) agp.free))
return DRM_ERR(EINVAL);
mmFreeMem((PMemBlock)agp.free);
mmFreeMem((PMemBlock) agp.free);
if (!del_alloc_set(agp.context, AGP_TYPE, agp.free))
return DRM_ERR(EINVAL);
@ -333,27 +332,26 @@ int sis_init_context(struct drm_device *dev, int context)
{
int i;
for (i = 0; i < MAX_CONTEXT ; i++) {
for (i = 0; i < MAX_CONTEXT; i++) {
if (global_ppriv[i].used &&
(global_ppriv[i].context == context))
break;
}
if (i >= MAX_CONTEXT) {
for (i = 0; i < MAX_CONTEXT ; i++) {
for (i = 0; i < MAX_CONTEXT; i++) {
if (!global_ppriv[i].used) {
global_ppriv[i].context = context;
global_ppriv[i].used = 1;
global_ppriv[i].sets[0] = setInit();
global_ppriv[i].sets[1] = setInit();
DRM_DEBUG("init allocation set, socket=%d, "
"context = %d\n", i, context);
"context = %d\n", i, context);
break;
}
}
if ((i >= MAX_CONTEXT) || (global_ppriv[i].sets[0] == NULL) ||
(global_ppriv[i].sets[1] == NULL))
{
(global_ppriv[i].sets[1] == NULL)) {
return 0;
}
}
@ -365,7 +363,7 @@ int sis_final_context(struct drm_device *dev, int context)
{
int i;
for (i=0; i<MAX_CONTEXT; i++) {
for (i = 0; i < MAX_CONTEXT; i++) {
if (global_ppriv[i].used &&
(global_ppriv[i].context == context))
break;
@ -386,7 +384,7 @@ int sis_final_context(struct drm_device *dev, int context)
#if defined(__linux__) && defined(CONFIG_FB_SIS)
sis_free(item);
#else
mmFreeMem((PMemBlock)item);
mmFreeMem((PMemBlock) item);
#endif
retval = setNext(set, &item);
}
@ -397,13 +395,13 @@ int sis_final_context(struct drm_device *dev, int context)
retval = setFirst(set, &item);
while (retval) {
DRM_DEBUG("free agp memory 0x%lx\n", item);
mmFreeMem((PMemBlock)item);
mmFreeMem((PMemBlock) item);
retval = setNext(set, &item);
}
setDestroy(set);
global_ppriv[i].used = 0;
}
}
return 1;
}

View File

@ -30,10 +30,6 @@
#ifndef __TDFX_H__
#define __TDFX_H__
/* This remains constant for all DRM template files.
*/
#define DRM(x) tdfx_##x
/* General customization:
*/

View File

@ -174,7 +174,7 @@
/* Command
* Command A
*/
#define HC_HCmdHeader_MASK 0xfe000000 /*0xffe00000*/
#define HC_HCmdHeader_MASK 0xfe000000 /*0xffe00000 */
#define HC_HE3Fire_MASK 0x00100000
#define HC_HPMType_MASK 0x000f0000
#define HC_HEFlag_MASK 0x0000e000

View File

@ -21,14 +21,14 @@ static void via_cmdbuf_start(drm_via_private_t * dev_priv);
static void via_cmdbuf_pause(drm_via_private_t * dev_priv);
static void via_cmdbuf_reset(drm_via_private_t * dev_priv);
static void via_cmdbuf_rewind(drm_via_private_t * dev_priv);
static int via_wait_idle(drm_via_private_t * dev_priv);
static int via_wait_idle(drm_via_private_t * dev_priv);
static inline int
via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
{
uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
uint32_t cur_addr, hw_addr, next_addr;
volatile uint32_t * hw_addr_ptr;
volatile uint32_t *hw_addr_ptr;
uint32_t count;
hw_addr_ptr = dev_priv->hw_addr_ptr;
cur_addr = agp_base + dev_priv->dma_low;
@ -38,12 +38,13 @@ via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
* a large 64KB window between buffer head and tail.
*/
next_addr = cur_addr + size + 64 * 1024;
count = 1000000; /* How long is this? */
count = 1000000; /* How long is this? */
do {
hw_addr = *hw_addr_ptr;
if (count-- == 0) {
DRM_ERROR("via_cmdbuf_wait timed out hw %x dma_low %x\n",
hw_addr, dev_priv->dma_low);
DRM_ERROR
("via_cmdbuf_wait timed out hw %x dma_low %x\n",
hw_addr, dev_priv->dma_low);
return -1;
}
} while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
@ -56,8 +57,8 @@ via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
*
* Returns virtual pointer to ring buffer.
*/
static inline uint32_t *
via_check_dma(drm_via_private_t * dev_priv, unsigned int size)
static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
unsigned int size)
{
if ((dev_priv->dma_low + size + 0x400) > dev_priv->dma_high) {
via_cmdbuf_rewind(dev_priv);
@ -66,19 +67,19 @@ via_check_dma(drm_via_private_t * dev_priv, unsigned int size)
return NULL;
}
return (uint32_t*)(dev_priv->dma_ptr + dev_priv->dma_low);
return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
}
int via_dma_cleanup(drm_device_t *dev)
int via_dma_cleanup(drm_device_t * dev)
{
if (dev->dev_private) {
drm_via_private_t *dev_priv =
(drm_via_private_t *) dev->dev_private;
(drm_via_private_t *) dev->dev_private;
if (dev_priv->ring.virtual_start) {
via_cmdbuf_reset(dev_priv);
drm_core_ioremapfree( &dev_priv->ring.map, dev);
drm_core_ioremapfree(&dev_priv->ring.map, dev);
dev_priv->ring.virtual_start = NULL;
}
}
@ -86,10 +87,9 @@ int via_dma_cleanup(drm_device_t *dev)
return 0;
}
static int via_initialize(drm_device_t *dev,
drm_via_private_t *dev_priv,
drm_via_dma_init_t *init)
static int via_initialize(drm_device_t * dev,
drm_via_private_t * dev_priv,
drm_via_dma_init_t * init)
{
if (!dev_priv || !dev_priv->mmio) {
DRM_ERROR("via_dma_init called before via_map_init\n");
@ -98,7 +98,7 @@ static int via_initialize(drm_device_t *dev,
if (dev_priv->ring.virtual_start != NULL) {
DRM_ERROR("%s called again without calling cleanup\n",
__FUNCTION__);
__FUNCTION__);
return DRM_ERR(EFAULT);
}
@ -108,12 +108,12 @@ static int via_initialize(drm_device_t *dev,
dev_priv->ring.map.flags = 0;
dev_priv->ring.map.mtrr = 0;
drm_core_ioremap( &dev_priv->ring.map, dev );
drm_core_ioremap(&dev_priv->ring.map, dev);
if (dev_priv->ring.map.handle == NULL) {
via_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
" ring buffer\n");
return DRM_ERR(ENOMEM);
}
@ -131,17 +131,17 @@ static int via_initialize(drm_device_t *dev,
return 0;
}
int via_dma_init( DRM_IOCTL_ARGS )
int via_dma_init(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
drm_via_dma_init_t init;
int retcode = 0;
DRM_COPY_FROM_USER_IOCTL(init, (drm_via_dma_init_t *)data, sizeof(init));
DRM_COPY_FROM_USER_IOCTL(init, (drm_via_dma_init_t *) data,
sizeof(init));
switch(init.func) {
switch (init.func) {
case VIA_INIT_DMA:
retcode = via_initialize(dev, dev_priv, &init);
break;
@ -156,12 +156,10 @@ int via_dma_init( DRM_IOCTL_ARGS )
return retcode;
}
static int via_dispatch_cmdbuffer(drm_device_t *dev,
drm_via_cmdbuffer_t *cmd )
static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd)
{
drm_via_private_t *dev_priv = dev->dev_private;
uint32_t * vb;
uint32_t *vb;
vb = via_check_dma(dev_priv, cmd->size);
if (vb == NULL) {
return DRM_ERR(EAGAIN);
@ -175,8 +173,7 @@ static int via_dispatch_cmdbuffer(drm_device_t *dev,
return 0;
}
static int via_quiescent(drm_device_t *dev)
static int via_quiescent(drm_device_t * dev)
{
drm_via_private_t *dev_priv = dev->dev_private;
@ -186,12 +183,11 @@ static int via_quiescent(drm_device_t *dev)
return 0;
}
int via_flush_ioctl( DRM_IOCTL_ARGS )
int via_flush_ioctl(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("via_flush_ioctl called without lock held\n");
return DRM_ERR(EINVAL);
}
@ -199,24 +195,23 @@ int via_flush_ioctl( DRM_IOCTL_ARGS )
return via_quiescent(dev);
}
int via_cmdbuffer( DRM_IOCTL_ARGS )
int via_cmdbuffer(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_via_cmdbuffer_t cmdbuf;
int ret;
DRM_COPY_FROM_USER_IOCTL( cmdbuf, (drm_via_cmdbuffer_t *)data,
sizeof(cmdbuf) );
DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t *) data,
sizeof(cmdbuf));
DRM_DEBUG("via cmdbuffer, buf %p size %lu\n", cmdbuf.buf, cmdbuf.size);
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("via_cmdbuffer called without lock held\n");
return DRM_ERR(EINVAL);
}
ret = via_dispatch_cmdbuffer( dev, &cmdbuf );
ret = via_dispatch_cmdbuffer(dev, &cmdbuf);
if (ret) {
return ret;
}
@ -224,39 +219,39 @@ int via_cmdbuffer( DRM_IOCTL_ARGS )
return 0;
}
static int via_parse_pci_cmdbuffer( drm_device_t *dev, const char *buf,
unsigned int size )
static int via_parse_pci_cmdbuffer(drm_device_t * dev, const char *buf,
unsigned int size)
{
drm_via_private_t *dev_priv = dev->dev_private;
uint32_t offset, value;
const uint32_t *regbuf = (uint32_t *)buf;
const uint32_t *regbuf = (uint32_t *) buf;
unsigned int i;
size >>=3 ;
for (i=0; i<size; ++i) {
size >>= 3;
for (i = 0; i < size; ++i) {
offset = *regbuf;
regbuf += 2;
if ((offset > ((0x7FF >> 2) | VIA_2D_CMD)) &&
(offset < ((0xC00 >> 2) | VIA_2D_CMD)) ) {
(offset < ((0xC00 >> 2) | VIA_2D_CMD))) {
DRM_DEBUG("Attempt to access Burst Command Area.\n");
return DRM_ERR( EINVAL );
return DRM_ERR(EINVAL);
} else if (offset > ((0xDFF >> 2) | VIA_2D_CMD)) {
DRM_DEBUG("Attempt to access DMA or VGA registers.\n");
return DRM_ERR( EINVAL );
return DRM_ERR(EINVAL);
}
}
regbuf = (uint32_t *)buf;
for ( i=0; i<size; ++i ) {
regbuf = (uint32_t *) buf;
for (i = 0; i < size; ++i) {
offset = (*regbuf++ & ~VIA_2D_CMD) << 2;
value = *regbuf++;
VIA_WRITE( offset, value );
VIA_WRITE(offset, value);
}
return 0;
}
static int via_dispatch_pci_cmdbuffer(drm_device_t *dev,
drm_via_cmdbuffer_t *cmd )
static int via_dispatch_pci_cmdbuffer(drm_device_t * dev,
drm_via_cmdbuffer_t * cmd)
{
drm_via_private_t *dev_priv = dev->dev_private;
char *hugebuf;
@ -269,42 +264,42 @@ static int via_dispatch_pci_cmdbuffer(drm_device_t *dev,
* Small buffers must, on the other hand be handled fast.
*/
if ( cmd->size > VIA_MAX_PCI_SIZE ) {
return DRM_ERR( ENOMEM );
} else if ( cmd->size > VIA_PREALLOCATED_PCI_SIZE ) {
if (NULL == (hugebuf = (char *) kmalloc( cmd-> size, GFP_KERNEL )))
return DRM_ERR( ENOMEM );
if (DRM_COPY_FROM_USER( hugebuf, cmd->buf, cmd->size ))
if (cmd->size > VIA_MAX_PCI_SIZE) {
return DRM_ERR(ENOMEM);
} else if (cmd->size > VIA_PREALLOCATED_PCI_SIZE) {
if (NULL == (hugebuf = (char *)kmalloc(cmd->size, GFP_KERNEL)))
return DRM_ERR(ENOMEM);
if (DRM_COPY_FROM_USER(hugebuf, cmd->buf, cmd->size))
return DRM_ERR(EFAULT);
ret = via_parse_pci_cmdbuffer( dev, hugebuf, cmd->size );
kfree( hugebuf );
ret = via_parse_pci_cmdbuffer(dev, hugebuf, cmd->size);
kfree(hugebuf);
} else {
if (DRM_COPY_FROM_USER( dev_priv->pci_buf, cmd->buf, cmd->size ))
if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
return DRM_ERR(EFAULT);
ret = via_parse_pci_cmdbuffer( dev, dev_priv->pci_buf, cmd->size );
ret =
via_parse_pci_cmdbuffer(dev, dev_priv->pci_buf, cmd->size);
}
return ret;
}
int via_pci_cmdbuffer( DRM_IOCTL_ARGS )
int via_pci_cmdbuffer(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_via_cmdbuffer_t cmdbuf;
int ret;
DRM_COPY_FROM_USER_IOCTL( cmdbuf, (drm_via_cmdbuffer_t *)data,
sizeof(cmdbuf) );
DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t *) data,
sizeof(cmdbuf));
DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf.buf, cmdbuf.size);
DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf.buf,
cmdbuf.size);
if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("via_pci_cmdbuffer called without lock held\n");
return DRM_ERR(EINVAL);
}
ret = via_dispatch_pci_cmdbuffer( dev, &cmdbuf );
ret = via_dispatch_pci_cmdbuffer(dev, &cmdbuf);
if (ret) {
return ret;
}
@ -312,9 +307,6 @@ int via_pci_cmdbuffer( DRM_IOCTL_ARGS )
return 0;
}
/************************************************************************/
#include "via_3d_reg.h"
@ -327,11 +319,10 @@ int via_pci_cmdbuffer( DRM_IOCTL_ARGS )
#define VIA_REG_TRANSPACE 0x440
/* VIA_REG_STATUS(0x400): Engine Status */
#define VIA_CMD_RGTR_BUSY 0x00000080 /* Command Regulator is busy */
#define VIA_2D_ENG_BUSY 0x00000001 /* 2D Engine is busy */
#define VIA_3D_ENG_BUSY 0x00000002 /* 3D Engine is busy */
#define VIA_VR_QUEUE_BUSY 0x00020000 /* Virtual Queue is busy */
#define VIA_CMD_RGTR_BUSY 0x00000080 /* Command Regulator is busy */
#define VIA_2D_ENG_BUSY 0x00000001 /* 2D Engine is busy */
#define VIA_3D_ENG_BUSY 0x00000002 /* 3D Engine is busy */
#define VIA_VR_QUEUE_BUSY 0x00020000 /* Virtual Queue is busy */
#define SetReg2DAGP(nReg, nData) { \
*((uint32_t *)(vb)) = ((nReg) >> 2) | 0xF0000000; \
@ -342,10 +333,10 @@ int via_pci_cmdbuffer( DRM_IOCTL_ARGS )
static uint32_t via_swap_count = 0;
static inline uint32_t *
via_align_buffer(drm_via_private_t * dev_priv, uint32_t * vb, int qw_count)
static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
uint32_t * vb, int qw_count)
{
for ( ; qw_count > 0; --qw_count) {
for (; qw_count > 0; --qw_count) {
*vb++ = (0xcc000000 | (dev_priv->dma_low & 0xffffff));
*vb++ = (0xdd400000 | via_swap_count);
dev_priv->dma_low += 8;
@ -359,29 +350,28 @@ via_align_buffer(drm_via_private_t * dev_priv, uint32_t * vb, int qw_count)
*
* Returns virtual pointer to ring buffer.
*/
static inline uint32_t * via_get_dma(drm_via_private_t * dev_priv)
static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv)
{
return (uint32_t*)(dev_priv->dma_ptr + dev_priv->dma_low);
return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
}
static int via_wait_idle(drm_via_private_t * dev_priv)
{
int count = 10000000;
while (count-- && (VIA_READ(VIA_REG_STATUS) &
(VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY | VIA_3D_ENG_BUSY)));
(VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
VIA_3D_ENG_BUSY))) ;
return count;
}
static inline void
via_dummy_bitblt(drm_via_private_t * dev_priv)
static inline void via_dummy_bitblt(drm_via_private_t * dev_priv)
{
uint32_t * vb = via_get_dma(dev_priv);
/* GEDST*/
uint32_t *vb = via_get_dma(dev_priv);
/* GEDST */
SetReg2DAGP(0x0C, (0 | (0 << 16)));
/* GEWD*/
/* GEWD */
SetReg2DAGP(0x10, 0 | (0 << 16));
/* BITBLT*/
/* BITBLT */
SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000);
}
@ -393,7 +383,7 @@ static void via_cmdbuf_start(drm_via_private_t * dev_priv)
uint32_t end_addr, end_addr_lo;
uint32_t qw_pad_count;
uint32_t command;
uint32_t * vb;
uint32_t *vb;
dev_priv->dma_low = 0;
vb = via_get_dma(dev_priv);
@ -405,28 +395,27 @@ static void via_cmdbuf_start(drm_via_private_t * dev_priv)
start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF));
end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
((end_addr & 0xff000000) >> 16));
((end_addr & 0xff000000) >> 16));
*vb++ = HC_HEADER2 | ((VIA_REG_TRANSET>>2)<<12) |
(VIA_REG_TRANSPACE>>2);
*vb++ = (HC_ParaType_PreCR<<16);
*vb++ = HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
(VIA_REG_TRANSPACE >> 2);
*vb++ = (HC_ParaType_PreCR << 16);
dev_priv->dma_low += 8;
qw_pad_count = (CMDBUF_ALIGNMENT_SIZE>>3) -
((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
pause_addr = agp_base + dev_priv->dma_low - 8 + (qw_pad_count<<3);
pause_addr_lo = ((HC_SubA_HAGPBpL<<24) |
HC_HAGPBpID_PAUSE |
(pause_addr & 0xffffff));
pause_addr_hi = ((HC_SubA_HAGPBpH<<24) | (pause_addr >> 24));
pause_addr = agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
pause_addr_lo = ((HC_SubA_HAGPBpL << 24) |
HC_HAGPBpID_PAUSE | (pause_addr & 0xffffff));
pause_addr_hi = ((HC_SubA_HAGPBpH << 24) | (pause_addr >> 24));
vb = via_align_buffer(dev_priv, vb, qw_pad_count-1);
vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
*vb++ = pause_addr_hi;
*vb++ = pause_addr_lo;
dev_priv->dma_low += 8;
dev_priv->last_pause_ptr = vb-1;
dev_priv->last_pause_ptr = vb - 1;
VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
VIA_WRITE(VIA_REG_TRANSPACE, command);
@ -445,7 +434,7 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
uint32_t pause_addr, pause_addr_lo, pause_addr_hi;
uint32_t start_addr;
uint32_t end_addr, end_addr_lo;
uint32_t * vb;
uint32_t *vb;
uint32_t qw_pad_count;
uint32_t command;
uint32_t jump_addr, jump_addr_lo, jump_addr_hi;
@ -459,38 +448,37 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
via_cmdbuf_wait(dev_priv, 48);
via_dummy_bitblt(dev_priv);
via_cmdbuf_wait(dev_priv, 2*CMDBUF_ALIGNMENT_SIZE);
via_cmdbuf_wait(dev_priv, 2 * CMDBUF_ALIGNMENT_SIZE);
/* At end of buffer, rewind with a JUMP command. */
vb = via_get_dma(dev_priv);
*vb++ = HC_HEADER2 | ((VIA_REG_TRANSET>>2)<<12) |
(VIA_REG_TRANSPACE>>2);
*vb++ = (HC_ParaType_PreCR<<16);
*vb++ = HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
(VIA_REG_TRANSPACE >> 2);
*vb++ = (HC_ParaType_PreCR << 16);
dev_priv->dma_low += 8;
qw_pad_count = (CMDBUF_ALIGNMENT_SIZE>>3) -
((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
start_addr = agp_base;
end_addr = agp_base + dev_priv->dma_low - 8 + (qw_pad_count<<3);
end_addr = agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
jump_addr = end_addr;
jump_addr_lo = ((HC_SubA_HAGPBpL<<24) | HC_HAGPBpID_JUMP |
jump_addr_lo = ((HC_SubA_HAGPBpL << 24) | HC_HAGPBpID_JUMP |
(jump_addr & 0xffffff));
jump_addr_hi = ((HC_SubA_HAGPBpH<<24) | (jump_addr >> 24));
jump_addr_hi = ((HC_SubA_HAGPBpH << 24) | (jump_addr >> 24));
end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
((end_addr & 0xff000000) >> 16));
((end_addr & 0xff000000) >> 16));
*vb++ = command;
*vb++ = end_addr_lo;
dev_priv->dma_low += 8;
vb = via_align_buffer(dev_priv, vb, qw_pad_count-1);
vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
/* Now at beginning of buffer, make sure engine will pause here. */
dev_priv->dma_low = 0;
@ -503,19 +491,19 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
((end_addr & 0xff000000) >> 16));
((end_addr & 0xff000000) >> 16));
qw_pad_count = (CMDBUF_ALIGNMENT_SIZE>>3) -
((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
pause_addr = agp_base + dev_priv->dma_low - 8 + (qw_pad_count<<3);
pause_addr_lo = ((HC_SubA_HAGPBpL<<24) | HC_HAGPBpID_PAUSE |
(pause_addr & 0xffffff));
pause_addr_hi = ((HC_SubA_HAGPBpH<<24) | (pause_addr >> 24));
pause_addr = agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
pause_addr_lo = ((HC_SubA_HAGPBpL << 24) | HC_HAGPBpID_PAUSE |
(pause_addr & 0xffffff));
pause_addr_hi = ((HC_SubA_HAGPBpH << 24) | (pause_addr >> 24));
*vb++ = HC_HEADER2 | ((VIA_REG_TRANSET>>2)<<12) |
(VIA_REG_TRANSPACE>>2);
*vb++ = (HC_ParaType_PreCR<<16);
*vb++ = HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
(VIA_REG_TRANSPACE >> 2);
*vb++ = (HC_ParaType_PreCR << 16);
dev_priv->dma_low += 8;
*vb++ = pause_addr_hi;
@ -533,7 +521,7 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
dev_priv->dma_low += 8;
*dev_priv->last_pause_ptr = jump_addr_lo;
dev_priv->last_pause_ptr = vb-1;
dev_priv->last_pause_ptr = vb - 1;
if (VIA_READ(0x41c) & 0x80000000) {
VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
@ -552,33 +540,33 @@ static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type)
{
uint32_t agp_base;
uint32_t pause_addr, pause_addr_lo, pause_addr_hi;
uint32_t * vb;
uint32_t *vb;
uint32_t qw_pad_count;
via_cmdbuf_wait(dev_priv, 0x200);
vb = via_get_dma(dev_priv);
*vb++ = HC_HEADER2 | ((VIA_REG_TRANSET>>2)<<12) |
(VIA_REG_TRANSPACE>>2);
*vb++ = (HC_ParaType_PreCR<<16);
*vb++ = HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
(VIA_REG_TRANSPACE >> 2);
*vb++ = (HC_ParaType_PreCR << 16);
dev_priv->dma_low += 8;
agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
qw_pad_count = (CMDBUF_ALIGNMENT_SIZE>>3) -
((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
pause_addr = agp_base + dev_priv->dma_low - 8 + (qw_pad_count<<3);
pause_addr_lo = ((HC_SubA_HAGPBpL<<24) | cmd_type |
(pause_addr & 0xffffff));
pause_addr_hi = ((HC_SubA_HAGPBpH<<24) | (pause_addr >> 24));
pause_addr = agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
pause_addr_lo = ((HC_SubA_HAGPBpL << 24) | cmd_type |
(pause_addr & 0xffffff));
pause_addr_hi = ((HC_SubA_HAGPBpH << 24) | (pause_addr >> 24));
vb = via_align_buffer(dev_priv, vb, qw_pad_count-1);
vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
*vb++ = pause_addr_hi;
*vb++ = pause_addr_lo;
dev_priv->dma_low += 8;
*dev_priv->last_pause_ptr = pause_addr_lo;
dev_priv->last_pause_ptr = vb-1;
dev_priv->last_pause_ptr = vb - 1;
if (VIA_READ(0x41c) & 0x80000000) {
VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
@ -597,5 +585,3 @@ static void via_cmdbuf_reset(drm_via_private_t * dev_priv)
via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
via_wait_idle(dev_priv);
}
/************************************************************************/

View File

@ -52,8 +52,8 @@
#define VIA_LOG_MIN_TEX_REGION_SIZE 16
#endif
#define VIA_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
#define VIA_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
#define VIA_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
#define VIA_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
#define VIA_UPLOAD_CTX 0x4
#define VIA_UPLOAD_BUFFERS 0x8
#define VIA_UPLOAD_TEX0 0x10
@ -85,7 +85,6 @@
#define DRM_IOCTL_VIA_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_VIA_FLUSH)
#define DRM_IOCTL_VIA_PCICMD DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_PCICMD, drm_via_cmdbuffer_t)
/* Indices into buf.Setup where various bits of state are mirrored per
* context and per buffer. These can be fired at the card as a unit,
* or in a piecewise fashion as required.
@ -102,121 +101,120 @@
#define VIDEO 0
#define AGP 1
typedef struct {
unsigned int offset;
unsigned int size;
unsigned int offset;
unsigned int size;
} drm_via_agp_t;
typedef struct {
unsigned int offset;
unsigned int size;
unsigned int offset;
unsigned int size;
} drm_via_fb_t;
typedef struct {
unsigned int context;
unsigned int type;
unsigned int size;
unsigned long index;
unsigned long offset;
unsigned int context;
unsigned int type;
unsigned int size;
unsigned long index;
unsigned long offset;
} drm_via_mem_t;
typedef struct _drm_via_init {
enum {
VIA_INIT_MAP = 0x01,
VIA_CLEANUP_MAP = 0x02
} func;
enum {
VIA_INIT_MAP = 0x01,
VIA_CLEANUP_MAP = 0x02
} func;
unsigned long sarea_priv_offset;
unsigned long fb_offset;
unsigned long mmio_offset;
unsigned long agpAddr;
unsigned long sarea_priv_offset;
unsigned long fb_offset;
unsigned long mmio_offset;
unsigned long agpAddr;
} drm_via_init_t;
typedef struct _drm_via_futex {
enum {
VIA_FUTEX_WAIT = 0x00,
VIA_FUTEX_WAKE = 0X01
}fut;
unsigned int op;
unsigned int ms;
unsigned int lock;
unsigned int val;
enum {
VIA_FUTEX_WAIT = 0x00,
VIA_FUTEX_WAKE = 0X01
} fut;
unsigned int op;
unsigned int ms;
unsigned int lock;
unsigned int val;
} drm_via_futex_t;
typedef struct _drm_via_dma_init {
enum {
VIA_INIT_DMA = 0x01,
VIA_CLEANUP_DMA = 0x02
} func;
enum {
VIA_INIT_DMA = 0x01,
VIA_CLEANUP_DMA = 0x02
} func;
unsigned long offset;
unsigned long size;
unsigned long reg_pause_addr;
unsigned long offset;
unsigned long size;
unsigned long reg_pause_addr;
} drm_via_dma_init_t;
typedef struct _drm_via_cmdbuffer {
char *buf;
unsigned long size;
char *buf;
unsigned long size;
} drm_via_cmdbuffer_t;
/* Warning: If you change the SAREA structure you must change the Xserver
* structure as well */
typedef struct _drm_via_tex_region {
unsigned char next, prev; /* indices to form a circular LRU */
unsigned char inUse; /* owned by a client, or free? */
int age; /* tracked by clients to update local LRU's */
unsigned char next, prev; /* indices to form a circular LRU */
unsigned char inUse; /* owned by a client, or free? */
int age; /* tracked by clients to update local LRU's */
} drm_via_tex_region_t;
typedef struct _drm_via_sarea {
unsigned int dirty;
unsigned int nbox;
drm_clip_rect_t boxes[VIA_NR_SAREA_CLIPRECTS];
drm_via_tex_region_t texList[VIA_NR_TEX_REGIONS + 1];
int texAge; /* last time texture was uploaded */
int ctxOwner; /* last context to upload state */
int vertexPrim;
unsigned int dirty;
unsigned int nbox;
drm_clip_rect_t boxes[VIA_NR_SAREA_CLIPRECTS];
drm_via_tex_region_t texList[VIA_NR_TEX_REGIONS + 1];
int texAge; /* last time texture was uploaded */
int ctxOwner; /* last context to upload state */
int vertexPrim;
/*
* Below is for XvMC.
* We want the lock integers alone on, and aligned to, a cache line.
* Therefore this somewhat strange construct.
*/
/*
* Below is for XvMC.
* We want the lock integers alone on, and aligned to, a cache line.
* Therefore this somewhat strange construct.
*/
char XvMCLockArea[VIA_MAX_CACHELINE_SIZE * (VIA_NR_XVMC_LOCKS + 1)];
char XvMCLockArea[VIA_MAX_CACHELINE_SIZE * (VIA_NR_XVMC_LOCKS + 1)];
unsigned int XvMCDisplaying[VIA_NR_XVMC_PORTS];
unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS];
unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */
unsigned int XvMCDisplaying[VIA_NR_XVMC_PORTS];
unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS];
unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */
} drm_via_sarea_t;
typedef struct _drm_via_flush_agp {
unsigned int offset;
unsigned int size;
unsigned int index;
int discard; /* client is finished with the buffer? */
unsigned int offset;
unsigned int size;
unsigned int index;
int discard; /* client is finished with the buffer? */
} drm_via_flush_agp_t;
typedef struct _drm_via_flush_sys {
unsigned int offset;
unsigned int size;
unsigned long index;
int discard; /* client is finished with the buffer? */
unsigned int offset;
unsigned int size;
unsigned long index;
int discard; /* client is finished with the buffer? */
} drm_via_flush_sys_t;
#ifdef __KERNEL__
int via_fb_init( DRM_IOCTL_ARGS );
int via_mem_alloc( DRM_IOCTL_ARGS );
int via_mem_free( DRM_IOCTL_ARGS );
int via_agp_init( DRM_IOCTL_ARGS );
int via_map_init( DRM_IOCTL_ARGS );
int via_decoder_futex( DRM_IOCTL_ARGS );
int via_dma_init( DRM_IOCTL_ARGS );
int via_cmdbuffer( DRM_IOCTL_ARGS );
int via_flush_ioctl( DRM_IOCTL_ARGS );
int via_pci_cmdbuffer( DRM_IOCTL_ARGS );
int via_fb_init(DRM_IOCTL_ARGS);
int via_mem_alloc(DRM_IOCTL_ARGS);
int via_mem_free(DRM_IOCTL_ARGS);
int via_agp_init(DRM_IOCTL_ARGS);
int via_map_init(DRM_IOCTL_ARGS);
int via_decoder_futex(DRM_IOCTL_ARGS);
int via_dma_init(DRM_IOCTL_ARGS);
int via_cmdbuffer(DRM_IOCTL_ARGS);
int via_flush_ioctl(DRM_IOCTL_ARGS);
int via_pci_cmdbuffer(DRM_IOCTL_ARGS);
#endif
#endif /* _VIA_DRM_H_ */
#endif /* _VIA_DRM_H_ */

View File

@ -39,30 +39,28 @@
#include "drm_pciids.h"
static int postinit( struct drm_device *dev, unsigned long flags )
static int postinit(struct drm_device *dev, unsigned long flags)
{
DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d: %s\n",
DRIVER_NAME,
DRIVER_MAJOR,
DRIVER_MINOR,
DRIVER_PATCHLEVEL,
DRIVER_DATE,
dev->minor,
pci_pretty_name(dev->pdev)
);
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
DRIVER_NAME,
DRIVER_MAJOR,
DRIVER_MINOR,
DRIVER_PATCHLEVEL,
DRIVER_DATE, dev->minor, pci_pretty_name(dev->pdev)
);
return 0;
}
static int version( drm_version_t *version )
static int version(drm_version_t * version)
{
int len;
version->version_major = DRIVER_MAJOR;
version->version_minor = DRIVER_MINOR;
version->version_patchlevel = DRIVER_PATCHLEVEL;
DRM_COPY( version->name, DRIVER_NAME );
DRM_COPY( version->date, DRIVER_DATE );
DRM_COPY( version->desc, DRIVER_DESC );
DRM_COPY(version->name, DRIVER_NAME);
DRM_COPY(version->date, DRIVER_DATE);
DRM_COPY(version->desc, DRIVER_DESC);
return 0;
}
@ -71,20 +69,22 @@ static struct pci_device_id pciidlist[] = {
};
static drm_ioctl_desc_t ioctls[] = {
[DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = { via_mem_alloc, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = { via_mem_free, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = { via_agp_init, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_FB_INIT)] = { via_fb_init, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_MAP_INIT)] = { via_map_init, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_DEC_FUTEX)] = { via_decoder_futex, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_DMA_INIT)] = { via_dma_init, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_CMDBUFFER)] = { via_cmdbuffer, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_FLUSH)] = { via_flush_ioctl, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_PCICMD)] = { via_pci_cmdbuffer, 1, 0}
[DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_FB_INIT)] = {via_fb_init, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_MAP_INIT)] = {via_map_init, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_DEC_FUTEX)] = {via_decoder_futex, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_DMA_INIT)] = {via_dma_init, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_CMDBUFFER)] = {via_cmdbuffer, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, 1, 0}
};
static struct drm_driver_fn driver_fn = {
.driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
.context_ctor = via_init_context,
.context_dtor = via_final_context,
.vblank_wait = via_driver_vblank_wait,
@ -100,13 +100,13 @@ static struct drm_driver_fn driver_fn = {
.ioctls = ioctls,
.num_ioctls = DRM_ARRAY_SIZE(ioctls),
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.fasync = drm_fasync,
},
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.fasync = drm_fasync,
},
};
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@ -115,10 +115,10 @@ static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
static struct pci_driver driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
};
static int __init via_init(void)
@ -134,6 +134,6 @@ static void __exit via_exit(void)
module_init(via_init);
module_exit(via_exit);
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");

View File

@ -26,30 +26,28 @@
#include "via_drm.h"
typedef struct drm_via_ring_buffer {
drm_map_t map;
char * virtual_start;
drm_map_t map;
char *virtual_start;
} drm_via_ring_buffer_t;
typedef struct drm_via_private {
drm_via_sarea_t *sarea_priv;
drm_map_t *sarea;
drm_map_t *fb;
drm_map_t *mmio;
unsigned long agpAddr;
wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
char * dma_ptr;
unsigned int dma_low;
unsigned int dma_high;
unsigned int dma_offset;
uint32_t * last_pause_ptr;
volatile uint32_t * hw_addr_ptr;
drm_via_ring_buffer_t ring;
drm_via_sarea_t *sarea_priv;
drm_map_t *sarea;
drm_map_t *fb;
drm_map_t *mmio;
unsigned long agpAddr;
wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
char *dma_ptr;
unsigned int dma_low;
unsigned int dma_high;
unsigned int dma_offset;
uint32_t *last_pause_ptr;
volatile uint32_t *hw_addr_ptr;
drm_via_ring_buffer_t ring;
char pci_buf[VIA_PREALLOCATED_PCI_SIZE];
} drm_via_private_t;
/* VIA MMIO register access */
#define VIA_BASE ((dev_priv->mmio))
@ -58,22 +56,22 @@ typedef struct drm_via_private {
#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg)
#define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val)
extern int via_init_context(drm_device_t *dev, int context);
extern int via_final_context(drm_device_t *dev, int context);
extern int via_init_context(drm_device_t * dev, int context);
extern int via_final_context(drm_device_t * dev, int context);
extern int via_do_init_map(drm_device_t *dev, drm_via_init_t *init);
extern int via_do_cleanup_map(drm_device_t *dev);
extern int via_do_init_map(drm_device_t * dev, drm_via_init_t * init);
extern int via_do_cleanup_map(drm_device_t * dev);
extern int via_map_init(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int via_driver_vblank_wait(drm_device_t* dev, unsigned int* sequence);
extern int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
extern irqreturn_t via_driver_irq_handler( DRM_IRQ_ARGS );
extern void via_driver_irq_preinstall( drm_device_t *dev );
extern void via_driver_irq_postinstall( drm_device_t *dev );
extern void via_driver_irq_uninstall( drm_device_t *dev );
extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
extern void via_driver_irq_preinstall(drm_device_t * dev);
extern void via_driver_irq_postinstall(drm_device_t * dev);
extern void via_driver_irq_uninstall(drm_device_t * dev);
extern int via_dma_cleanup(drm_device_t *dev);
extern int via_dma_cleanup(drm_device_t * dev);
extern int via_dma_cleanup(drm_device_t *dev);
extern int via_dma_cleanup(drm_device_t * dev);
#endif

View File

@ -28,101 +28,100 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <asm/io.h>
#include <linux/pci.h>
#include <asm/io.h>
#include "via_ds.h"
extern unsigned int VIA_DEBUG;
set_t *via_setInit(void)
{
int i;
set_t *set;
set = (set_t *)drm_alloc(sizeof(set_t), DRM_MEM_DRIVER);
for (i = 0; i < SET_SIZE; i++) {
set->list[i].free_next = i+1;
set->list[i].alloc_next = -1;
}
set->list[SET_SIZE-1].free_next = -1;
set->free = 0;
set->alloc = -1;
set->trace = -1;
return set;
int i;
set_t *set;
set = (set_t *) drm_alloc(sizeof(set_t), DRM_MEM_DRIVER);
for (i = 0; i < SET_SIZE; i++) {
set->list[i].free_next = i + 1;
set->list[i].alloc_next = -1;
}
set->list[SET_SIZE - 1].free_next = -1;
set->free = 0;
set->alloc = -1;
set->trace = -1;
return set;
}
int via_setAdd(set_t *set, ITEM_TYPE item)
int via_setAdd(set_t * set, ITEM_TYPE item)
{
int free = set->free;
if (free != -1) {
set->list[free].val = item;
set->free = set->list[free].free_next;
}
else {
return 0;
}
set->list[free].alloc_next = set->alloc;
set->alloc = free;
set->list[free].free_next = -1;
return 1;
int free = set->free;
if (free != -1) {
set->list[free].val = item;
set->free = set->list[free].free_next;
} else {
return 0;
}
set->list[free].alloc_next = set->alloc;
set->alloc = free;
set->list[free].free_next = -1;
return 1;
}
int via_setDel(set_t *set, ITEM_TYPE item)
int via_setDel(set_t * set, ITEM_TYPE item)
{
int alloc = set->alloc;
int prev = -1;
int alloc = set->alloc;
int prev = -1;
while (alloc != -1) {
if (set->list[alloc].val == item) {
if (prev != -1)
set->list[prev].alloc_next = set->list[alloc].alloc_next;
else
set->alloc = set->list[alloc].alloc_next;
break;
}
prev = alloc;
alloc = set->list[alloc].alloc_next;
}
while (alloc != -1) {
if (set->list[alloc].val == item) {
if (prev != -1)
set->list[prev].alloc_next =
set->list[alloc].alloc_next;
else
set->alloc = set->list[alloc].alloc_next;
break;
}
prev = alloc;
alloc = set->list[alloc].alloc_next;
}
if (alloc == -1)
return 0;
if (alloc == -1)
return 0;
set->list[alloc].free_next = set->free;
set->free = alloc;
set->list[alloc].alloc_next = -1;
set->list[alloc].free_next = set->free;
set->free = alloc;
set->list[alloc].alloc_next = -1;
return 1;
return 1;
}
/* setFirst -> setAdd -> setNext is wrong */
int via_setFirst(set_t *set, ITEM_TYPE *item)
int via_setFirst(set_t * set, ITEM_TYPE * item)
{
if (set->alloc == -1)
return 0;
if (set->alloc == -1)
return 0;
*item = set->list[set->alloc].val;
set->trace = set->list[set->alloc].alloc_next;
*item = set->list[set->alloc].val;
set->trace = set->list[set->alloc].alloc_next;
return 1;
return 1;
}
int via_setNext(set_t *set, ITEM_TYPE *item)
int via_setNext(set_t * set, ITEM_TYPE * item)
{
if (set->trace == -1)
return 0;
if (set->trace == -1)
return 0;
*item = set->list[set->trace].val;
set->trace = set->list[set->trace].alloc_next;
*item = set->list[set->trace].val;
set->trace = set->list[set->trace].alloc_next;
return 1;
return 1;
}
int via_setDestroy(set_t *set)
int via_setDestroy(set_t * set)
{
drm_free(set, sizeof(set_t), DRM_MEM_DRIVER);
drm_free(set, sizeof(set_t), DRM_MEM_DRIVER);
return 1;
return 1;
}
#define ISFREE(bptr) ((bptr)->free)
@ -130,261 +129,260 @@ int via_setDestroy(set_t *set)
#define PRINTF(fmt, arg...) do{}while(0)
#define fprintf(fmt, arg...) do{}while(0)
void via_mmDumpMemInfo( memHeap_t *heap )
void via_mmDumpMemInfo(memHeap_t * heap)
{
TMemBlock *p;
TMemBlock *p;
PRINTF ("Memory heap %p:\n", heap);
PRINTF("Memory heap %p:\n", heap);
if (heap == 0)
PRINTF (" heap == 0\n");
else {
p = (TMemBlock *)heap;
if (heap == 0)
PRINTF(" heap == 0\n");
else {
p = (TMemBlock *) heap;
while (p) {
PRINTF (" Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size,
p->free ? '.':'U',
p->reserved ? 'R':'.');
p = p->next;
}
}
while (p) {
PRINTF(" Offset:%08x, Size:%08x, %c%c\n", p->ofs,
p->size, p->free ? '.' : 'U',
p->reserved ? 'R' : '.');
p = p->next;
}
}
PRINTF ("End of memory blocks\n");
PRINTF("End of memory blocks\n");
}
memHeap_t *via_mmInit(int ofs,
int size)
memHeap_t *via_mmInit(int ofs, int size)
{
PMemBlock blocks;
PMemBlock blocks;
if (size <= 0)
return 0;
if (size <= 0)
return 0;
blocks = (TMemBlock *) drm_calloc(1, sizeof(TMemBlock), DRM_MEM_DRIVER);
blocks = (TMemBlock *)drm_calloc(1,sizeof(TMemBlock),DRM_MEM_DRIVER);
if (blocks) {
blocks->ofs = ofs;
blocks->size = size;
blocks->free = 1;
return (memHeap_t *)blocks;
} else
return 0;
if (blocks) {
blocks->ofs = ofs;
blocks->size = size;
blocks->free = 1;
return (memHeap_t *) blocks;
} else
return 0;
}
memHeap_t *via_mmAddRange(memHeap_t *heap,
int ofs,
int size)
memHeap_t *via_mmAddRange(memHeap_t * heap, int ofs, int size)
{
PMemBlock blocks;
blocks = (TMemBlock *)drm_calloc(2,sizeof(TMemBlock),DRM_MEM_DRIVER);
PMemBlock blocks;
blocks = (TMemBlock *) drm_calloc(2, sizeof(TMemBlock), DRM_MEM_DRIVER);
if (blocks) {
blocks[0].size = size;
blocks[0].free = 1;
blocks[0].ofs = ofs;
blocks[0].next = &blocks[1];
if (blocks) {
blocks[0].size = size;
blocks[0].free = 1;
blocks[0].ofs = ofs;
blocks[0].next = &blocks[1];
/* Discontinuity - stops JoinBlock from trying to join non-adjacent
* ranges.
*/
blocks[1].size = 0;
blocks[1].free = 0;
blocks[1].ofs = ofs+size;
blocks[1].next = (PMemBlock) heap;
return (memHeap_t *)blocks;
}
else
return heap;
/* Discontinuity - stops JoinBlock from trying to join non-adjacent
* ranges.
*/
blocks[1].size = 0;
blocks[1].free = 0;
blocks[1].ofs = ofs + size;
blocks[1].next = (PMemBlock) heap;
return (memHeap_t *) blocks;
} else
return heap;
}
static TMemBlock* SliceBlock(TMemBlock *p,
static TMemBlock *SliceBlock(TMemBlock * p,
int startofs, int size,
int reserved, int alignment)
{
TMemBlock *newblock;
TMemBlock *newblock;
/* break left */
if (startofs > p->ofs) {
newblock = (TMemBlock*)drm_calloc(1,sizeof(TMemBlock),DRM_MEM_DRIVER);
newblock->ofs = startofs;
newblock->size = p->size - (startofs - p->ofs);
newblock->free = 1;
newblock->next = p->next;
p->size -= newblock->size;
p->next = newblock;
p = newblock;
}
/* break left */
if (startofs > p->ofs) {
newblock =
(TMemBlock *) drm_calloc(1, sizeof(TMemBlock),
DRM_MEM_DRIVER);
newblock->ofs = startofs;
newblock->size = p->size - (startofs - p->ofs);
newblock->free = 1;
newblock->next = p->next;
p->size -= newblock->size;
p->next = newblock;
p = newblock;
}
/* break right */
if (size < p->size) {
newblock = (TMemBlock*)drm_calloc(1,sizeof(TMemBlock),DRM_MEM_DRIVER);
newblock->ofs = startofs + size;
newblock->size = p->size - size;
newblock->free = 1;
newblock->next = p->next;
p->size = size;
p->next = newblock;
}
/* break right */
if (size < p->size) {
newblock =
(TMemBlock *) drm_calloc(1, sizeof(TMemBlock),
DRM_MEM_DRIVER);
newblock->ofs = startofs + size;
newblock->size = p->size - size;
newblock->free = 1;
newblock->next = p->next;
p->size = size;
p->next = newblock;
}
/* p = middle block */
p->align = alignment;
p->free = 0;
p->reserved = reserved;
return p;
/* p = middle block */
p->align = alignment;
p->free = 0;
p->reserved = reserved;
return p;
}
PMemBlock via_mmAllocMem(memHeap_t *heap, int size, int align2, int startSearch)
PMemBlock via_mmAllocMem(memHeap_t * heap, int size, int align2,
int startSearch)
{
int mask,startofs,endofs;
TMemBlock *p;
int mask, startofs, endofs;
TMemBlock *p;
if (!heap || align2 < 0 || size <= 0)
return NULL;
if (!heap || align2 < 0 || size <= 0)
return NULL;
mask = (1 << align2)-1;
startofs = 0;
p = (TMemBlock *)heap;
mask = (1 << align2) - 1;
startofs = 0;
p = (TMemBlock *) heap;
while (p) {
if (ISFREE(p)) {
startofs = (p->ofs + mask) & ~mask;
while (p) {
if (ISFREE(p)) {
startofs = (p->ofs + mask) & ~mask;
if ( startofs < startSearch )
startofs = startSearch;
if (startofs < startSearch)
startofs = startSearch;
endofs = startofs+size;
endofs = startofs + size;
if (endofs <= (p->ofs+p->size))
break;
}
if (endofs <= (p->ofs + p->size))
break;
}
p = p->next;
}
p = p->next;
}
if (!p)
return NULL;
if (!p)
return NULL;
p = SliceBlock(p,startofs,size,0,mask+1);
p->heap = heap;
p = SliceBlock(p, startofs, size, 0, mask + 1);
p->heap = heap;
return p;
return p;
}
static __inline__ int Join2Blocks(TMemBlock *p)
static __inline__ int Join2Blocks(TMemBlock * p)
{
if (p->free && p->next && p->next->free) {
TMemBlock *q = p->next;
p->size += q->size;
p->next = q->next;
drm_free(q,sizeof(TMemBlock),DRM_MEM_DRIVER);
if (p->free && p->next && p->next->free) {
TMemBlock *q = p->next;
p->size += q->size;
p->next = q->next;
drm_free(q, sizeof(TMemBlock), DRM_MEM_DRIVER);
return 1;
}
return 1;
}
return 0;
return 0;
}
int via_mmFreeMem(PMemBlock b)
{
TMemBlock *p,*prev;
TMemBlock *p, *prev;
if (!b)
return 0;
if (!b)
return 0;
if (!b->heap) {
fprintf(stderr, "no heap\n");
if (!b->heap) {
fprintf(stderr, "no heap\n");
return -1;
}
return -1;
}
p = b->heap;
prev = NULL;
p = b->heap;
prev = NULL;
while (p && p != b) {
prev = p;
p = p->next;
}
while (p && p != b) {
prev = p;
p = p->next;
}
if (!p || p->free || p->reserved) {
if (!p)
fprintf(stderr, "block not found in heap\n");
else if (p->free)
fprintf(stderr, "block already free\n");
else
fprintf(stderr, "block is reserved\n");
if (!p || p->free || p->reserved) {
if (!p)
fprintf(stderr, "block not found in heap\n");
else if (p->free)
fprintf(stderr, "block already free\n");
else
fprintf(stderr, "block is reserved\n");
return -1;
}
return -1;
}
p->free = 1;
Join2Blocks(p);
p->free = 1;
Join2Blocks(p);
if (prev)
Join2Blocks(prev);
if (prev)
Join2Blocks(prev);
return 0;
return 0;
}
int via_mmReserveMem(memHeap_t *heap, int offset,int size)
int via_mmReserveMem(memHeap_t * heap, int offset, int size)
{
int endofs;
TMemBlock *p;
int endofs;
TMemBlock *p;
if (!heap || size <= 0)
return -1;
endofs = offset+size;
p = (TMemBlock *)heap;
if (!heap || size <= 0)
return -1;
endofs = offset + size;
p = (TMemBlock *) heap;
while (p && p->ofs <= offset) {
if (ISFREE(p) && endofs <= (p->ofs+p->size)) {
SliceBlock(p,offset,size,1,1);
return 0;
}
p = p->next;
}
return -1;
while (p && p->ofs <= offset) {
if (ISFREE(p) && endofs <= (p->ofs + p->size)) {
SliceBlock(p, offset, size, 1, 1);
return 0;
}
p = p->next;
}
return -1;
}
int via_mmFreeReserved(memHeap_t *heap, int offset)
int via_mmFreeReserved(memHeap_t * heap, int offset)
{
TMemBlock *p,*prev;
TMemBlock *p, *prev;
if (!heap)
return -1;
if (!heap)
return -1;
p = (TMemBlock *)heap;
prev = NULL;
p = (TMemBlock *) heap;
prev = NULL;
while (p && p->ofs != offset) {
prev = p;
p = p->next;
}
while (p && p->ofs != offset) {
prev = p;
p = p->next;
}
if (!p || !p->reserved)
return -1;
p->free = 1;
p->reserved = 0;
Join2Blocks(p);
if (!p || !p->reserved)
return -1;
p->free = 1;
p->reserved = 0;
Join2Blocks(p);
if (prev)
Join2Blocks(prev);
if (prev)
Join2Blocks(prev);
return 0;
return 0;
}
void via_mmDestroy(memHeap_t *heap)
void via_mmDestroy(memHeap_t * heap)
{
TMemBlock *p,*q;
TMemBlock *p, *q;
if (!heap)
return;
p = (TMemBlock *)heap;
if (!heap)
return;
p = (TMemBlock *) heap;
while (p) {
q = p->next;
drm_free(p,sizeof(TMemBlock),DRM_MEM_DRIVER);
p = q;
}
while (p) {
q = p->next;
drm_free(p, sizeof(TMemBlock), DRM_MEM_DRIVER);
p = q;
}
}

View File

@ -33,37 +33,36 @@
typedef unsigned int ITEM_TYPE;
typedef struct {
ITEM_TYPE val;
int alloc_next, free_next;
ITEM_TYPE val;
int alloc_next, free_next;
} list_item_t;
typedef struct {
int alloc;
int free;
int trace;
list_item_t list[SET_SIZE];
int alloc;
int free;
int trace;
list_item_t list[SET_SIZE];
} set_t;
set_t *via_setInit(void);
int via_setAdd(set_t *set, ITEM_TYPE item);
int via_setDel(set_t *set, ITEM_TYPE item);
int via_setFirst(set_t *set, ITEM_TYPE *item);
int via_setNext(set_t *set, ITEM_TYPE *item);
int via_setDestroy(set_t *set);
int via_setAdd(set_t * set, ITEM_TYPE item);
int via_setDel(set_t * set, ITEM_TYPE item);
int via_setFirst(set_t * set, ITEM_TYPE * item);
int via_setNext(set_t * set, ITEM_TYPE * item);
int via_setDestroy(set_t * set);
#endif
#ifndef MM_INC
#define MM_INC
struct mem_block_t {
struct mem_block_t *next;
struct mem_block_t *heap;
int ofs,size;
int align;
int free:1;
int reserved:1;
struct mem_block_t *next;
struct mem_block_t *heap;
int ofs, size;
int align;
int free:1;
int reserved:1;
};
typedef struct mem_block_t TMemBlock;
typedef struct mem_block_t *PMemBlock;
@ -72,13 +71,19 @@ typedef struct mem_block_t *PMemBlock;
typedef struct mem_block_t memHeap_t;
static __inline__ int mmBlockSize(PMemBlock b)
{ return b->size; }
{
return b->size;
}
static __inline__ int mmOffset(PMemBlock b)
{ return b->ofs; }
{
return b->ofs;
}
static __inline__ void mmMarkReserved(PMemBlock b)
{ b->reserved = 1; }
{
b->reserved = 1;
}
/*
* input: total size in bytes
@ -86,22 +91,22 @@ static __inline__ void mmMarkReserved(PMemBlock b)
*/
memHeap_t *via_mmInit(int ofs, int size);
PMemBlock via_mmAllocMem(memHeap_t *heap, int size, int align2, int startSearch);
PMemBlock via_mmAllocMem(memHeap_t * heap, int size, int align2,
int startSearch);
/*
* Free block starts at offset
* input: pointer to a block
* return: 0 if OK, -1 if error
*/
int via_mmFreeMem(PMemBlock b);
int via_mmFreeMem(PMemBlock b);
/*
* destroy MM
*/
void via_mmDestroy(memHeap_t *mmInit);
void via_mmDestroy(memHeap_t * mmInit);
/* For debugging purpose. */
void via_mmDumpMemInfo(memHeap_t *mmInit);
void via_mmDumpMemInfo(memHeap_t * mmInit);
#endif

View File

@ -45,120 +45,122 @@
#define VIA_IRQ_VBI_ENABLE (1 << 19)
#define VIA_IRQ_VBI_PENDING (1 << 3)
irqreturn_t via_driver_irq_handler( DRM_IRQ_ARGS )
irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
{
drm_device_t* dev = (drm_device_t*)arg;
drm_via_private_t* dev_priv = (drm_via_private_t*)dev->dev_private;
u32 status;
int handled = 0;
drm_device_t *dev = (drm_device_t *) arg;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
int handled = 0;
status = VIA_READ(VIA_REG_INTERRUPT);
DRM_DEBUG("viadrv_irq_handler Status: %x\n",status);
if(status & VIA_IRQ_VBI_PENDING){
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals(dev);
handled = 1;
}
status = VIA_READ(VIA_REG_INTERRUPT);
DRM_DEBUG("viadrv_irq_handler Status: %x\n", status);
if (status & VIA_IRQ_VBI_PENDING) {
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals(dev);
handled = 1;
}
/* Acknowlege interrupts ?? */
VIA_WRITE(VIA_REG_INTERRUPT, status);
/* Acknowlege interrupts ?? */
VIA_WRITE(VIA_REG_INTERRUPT, status);
if (handled)
return IRQ_HANDLED;
return IRQ_HANDLED;
else
return IRQ_NONE;
}
static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t* dev_priv)
static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv)
{
u32 status;
u32 status;
if(dev_priv){
/* Acknowlege interrupts ?? */
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_VBI_PENDING);
}
if (dev_priv) {
/* Acknowlege interrupts ?? */
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_VBI_PENDING);
}
}
int via_driver_vblank_wait(drm_device_t* dev, unsigned int* sequence)
int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
{
drm_via_private_t* dev_priv = (drm_via_private_t*)dev->dev_private;
unsigned int cur_vblank;
int ret = 0;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
unsigned int cur_vblank;
int ret = 0;
DRM_DEBUG("viadrv_vblank_wait\n");
if(!dev_priv){
DRM_ERROR("%s called with no initialization\n", __FUNCTION__ );
return -EINVAL;
}
DRM_DEBUG("viadrv_vblank_wait\n");
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return -EINVAL;
}
viadrv_acknowledge_irqs(dev_priv);
viadrv_acknowledge_irqs(dev_priv);
/* Assume that the user has missed the current sequence number
* by about a day rather than she wants to wait for years
* using vertical blanks...
*/
DRM_WAIT_ON(ret, dev->vbl_queue, 3*DRM_HZ,
(((cur_vblank = atomic_read(&dev->vbl_received)) -
*sequence ) <= (1<<23)));
/* Assume that the user has missed the current sequence number
* by about a day rather than she wants to wait for years
* using vertical blanks...
*/
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(&dev->vbl_received)) -
*sequence) <= (1 << 23)));
*sequence = cur_vblank;
return ret;
*sequence = cur_vblank;
return ret;
}
/*
* drm_dma.h hooks
*/
void via_driver_irq_preinstall(drm_device_t* dev){
drm_via_private_t* dev_priv = (drm_via_private_t *)dev->dev_private;
u32 status;
void via_driver_irq_preinstall(drm_device_t * dev)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
DRM_DEBUG("driver_irq_preinstall: dev_priv: %p\n", dev_priv);
if(dev_priv){
DRM_DEBUG("mmio: %p\n", dev_priv->mmio);
status = VIA_READ(VIA_REG_INTERRUPT);
DRM_DEBUG("intreg: %x\n", status & VIA_IRQ_VBI_ENABLE);
DRM_DEBUG("driver_irq_preinstall: dev_priv: %p\n", dev_priv);
if (dev_priv) {
DRM_DEBUG("mmio: %p\n", dev_priv->mmio);
status = VIA_READ(VIA_REG_INTERRUPT);
DRM_DEBUG("intreg: %x\n", status & VIA_IRQ_VBI_ENABLE);
// Clear VSync interrupt regs
VIA_WRITE(VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBI_ENABLE);
// Clear VSync interrupt regs
VIA_WRITE(VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBI_ENABLE);
/* Clear bits if they're already high */
viadrv_acknowledge_irqs(dev_priv);
}
/* Clear bits if they're already high */
viadrv_acknowledge_irqs(dev_priv);
}
}
void via_driver_irq_postinstall(drm_device_t* dev){
drm_via_private_t* dev_priv = (drm_via_private_t *)dev->dev_private;
u32 status;
void via_driver_irq_postinstall(drm_device_t * dev)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
DRM_DEBUG("via_driver_irq_postinstall\n");
if(dev_priv){
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
| VIA_IRQ_VBI_ENABLE);
/* Some magic, oh for some data sheets ! */
DRM_DEBUG("via_driver_irq_postinstall\n");
if (dev_priv) {
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
| VIA_IRQ_VBI_ENABLE);
/* Some magic, oh for some data sheets ! */
VIA_WRITE8(0x83d4, 0x11);
VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
VIA_WRITE8(0x83d4, 0x11);
VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
}
}
}
void via_driver_irq_uninstall(drm_device_t* dev){
drm_via_private_t* dev_priv = (drm_via_private_t *)dev->dev_private;
u32 status;
void via_driver_irq_uninstall(drm_device_t * dev)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
DRM_DEBUG("driver_irq_uninstall)\n");
if(dev_priv){
DRM_DEBUG("driver_irq_uninstall)\n");
if (dev_priv) {
/* Some more magic, oh for some data sheets ! */
/* Some more magic, oh for some data sheets ! */
VIA_WRITE8(0x83d4, 0x11);
VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
VIA_WRITE8(0x83d4, 0x11);
VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBI_ENABLE);
}
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBI_ENABLE);
}
}

View File

@ -24,7 +24,7 @@
#include "drmP.h"
#include "via_drv.h"
int via_do_init_map(drm_device_t *dev, drm_via_init_t *init)
int via_do_init_map(drm_device_t * dev, drm_via_init_t * init)
{
drm_via_private_t *dev_priv;
unsigned int i;
@ -61,21 +61,20 @@ int via_do_init_map(drm_device_t *dev, drm_via_init_t *init)
}
dev_priv->sarea_priv =
(drm_via_sarea_t *)((u8 *)dev_priv->sarea->handle +
init->sarea_priv_offset);
(drm_via_sarea_t *) ((u8 *) dev_priv->sarea->handle +
init->sarea_priv_offset);
dev_priv->agpAddr = init->agpAddr;
for (i=0; i<VIA_NR_XVMC_LOCKS; ++i)
DRM_INIT_WAITQUEUE( &(dev_priv->decoder_queue[i]) );
for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i)
DRM_INIT_WAITQUEUE(&(dev_priv->decoder_queue[i]));
dev->dev_private = (void *)dev_priv;
return 0;
}
int via_do_cleanup_map(drm_device_t *dev)
int via_do_cleanup_map(drm_device_t * dev)
{
if (dev->dev_private) {
@ -83,61 +82,56 @@ int via_do_cleanup_map(drm_device_t *dev)
via_dma_cleanup(dev);
drm_free(dev_priv, sizeof(drm_via_private_t),
DRM_MEM_DRIVER);
drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
dev->dev_private = NULL;
}
return 0;
}
int via_map_init( DRM_IOCTL_ARGS )
int via_map_init(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
DRM_DEVICE;
drm_via_init_t init;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_COPY_FROM_USER_IOCTL(init, (drm_via_init_t *)data, sizeof(init));
DRM_COPY_FROM_USER_IOCTL(init, (drm_via_init_t *) data, sizeof(init));
switch (init.func) {
case VIA_INIT_MAP:
case VIA_INIT_MAP:
return via_do_init_map(dev, &init);
case VIA_CLEANUP_MAP:
case VIA_CLEANUP_MAP:
return via_do_cleanup_map(dev);
}
return -EINVAL;
}
int via_decoder_futex( DRM_IOCTL_ARGS )
int via_decoder_futex(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_via_futex_t fx;
volatile int *lock;
drm_via_private_t *dev_priv = (drm_via_private_t*) dev->dev_private;
drm_via_sarea_t *sAPriv = dev_priv->sarea_priv;
int ret = 0;
DRM_DEVICE;
drm_via_futex_t fx;
volatile int *lock;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
drm_via_sarea_t *sAPriv = dev_priv->sarea_priv;
int ret = 0;
DRM_COPY_FROM_USER_IOCTL(fx, (drm_via_futex_t *) data, sizeof(fx));
DRM_COPY_FROM_USER_IOCTL(fx, (drm_via_futex_t *) data, sizeof(fx));
if (fx.lock > VIA_NR_XVMC_LOCKS)
return -EFAULT;
if (fx.lock > VIA_NR_XVMC_LOCKS)
return -EFAULT;
lock = XVMCLOCKPTR(sAPriv,fx.lock);
lock = XVMCLOCKPTR(sAPriv, fx.lock);
switch(fx.op) {
case VIA_FUTEX_WAIT:
DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx.lock],
(fx.ms / 10)*(DRM_HZ/100),
*lock != fx.val);
return ret;
case VIA_FUTEX_WAKE:
DRM_WAKEUP( &(dev_priv->decoder_queue[fx.lock]) );
return 0;
}
return 0;
switch (fx.op) {
case VIA_FUTEX_WAIT:
DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx.lock],
(fx.ms / 10) * (DRM_HZ / 100), *lock != fx.val);
return ret;
case VIA_FUTEX_WAKE:
DRM_WAKEUP(&(dev_priv->decoder_queue[fx.lock]));
return 0;
}
return 0;
}

View File

@ -32,324 +32,317 @@
unsigned int VIA_DEBUG = 1;
typedef struct {
int used;
int context;
set_t *sets[2]; /* 0 for frame buffer, 1 for AGP , 2 for System*/
int used;
int context;
set_t *sets[2]; /* 0 for frame buffer, 1 for AGP , 2 for System */
} via_context_t;
static via_context_t global_ppriv[MAX_CONTEXT];
static int add_alloc_set(int context, int type, unsigned int val)
{
int i, retval = 0;
int i, retval = 0;
for (i = 0; i < MAX_CONTEXT; i++) {
if (global_ppriv[i].used &&
global_ppriv[i].context == context) {
retval = via_setAdd(global_ppriv[i].sets[type], val);
break;
}
}
for (i = 0; i < MAX_CONTEXT; i++) {
if (global_ppriv[i].used && global_ppriv[i].context == context) {
retval = via_setAdd(global_ppriv[i].sets[type], val);
break;
}
}
return retval;
return retval;
}
static int del_alloc_set(int context, int type, unsigned int val)
{
int i, retval = 0;
int i, retval = 0;
for (i = 0; i < MAX_CONTEXT; i++)
if (global_ppriv[i].used &&
global_ppriv[i].context == context) {
retval = via_setDel(global_ppriv[i].sets[type], val);
break;
}
for (i = 0; i < MAX_CONTEXT; i++)
if (global_ppriv[i].used && global_ppriv[i].context == context) {
retval = via_setDel(global_ppriv[i].sets[type], val);
break;
}
return retval;
return retval;
}
/* agp memory management */
static memHeap_t *AgpHeap = NULL;
int via_agp_init( DRM_IOCTL_ARGS )
int via_agp_init(DRM_IOCTL_ARGS)
{
drm_via_agp_t agp;
drm_via_agp_t agp;
DRM_COPY_FROM_USER_IOCTL(agp, (drm_via_agp_t *)data, sizeof(agp));
DRM_COPY_FROM_USER_IOCTL(agp, (drm_via_agp_t *) data, sizeof(agp));
AgpHeap = via_mmInit(agp.offset, agp.size);
AgpHeap = via_mmInit(agp.offset, agp.size);
DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size);
DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size);
return 0;
return 0;
}
/* fb memory management */
static memHeap_t *FBHeap = NULL;
int via_fb_init( DRM_IOCTL_ARGS )
int via_fb_init(DRM_IOCTL_ARGS)
{
drm_via_fb_t fb;
drm_via_fb_t fb;
DRM_COPY_FROM_USER_IOCTL(fb, (drm_via_fb_t *) data, sizeof(fb));
DRM_COPY_FROM_USER_IOCTL(fb, (drm_via_fb_t *)data, sizeof(fb));
FBHeap = via_mmInit(fb.offset, fb.size);
FBHeap = via_mmInit(fb.offset, fb.size);
DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size);
DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size);
return 0;
return 0;
}
int via_init_context(struct drm_device *dev, int context)
{
int i;
int i;
for (i = 0; i < MAX_CONTEXT ; i++)
if (global_ppriv[i].used &&
(global_ppriv[i].context == context))
break;
for (i = 0; i < MAX_CONTEXT; i++)
if (global_ppriv[i].used &&
(global_ppriv[i].context == context))
break;
if (i >= MAX_CONTEXT) {
for (i = 0; i < MAX_CONTEXT ; i++) {
if (!global_ppriv[i].used) {
global_ppriv[i].context = context;
global_ppriv[i].used = 1;
global_ppriv[i].sets[0] = via_setInit();
global_ppriv[i].sets[1] = via_setInit();
DRM_DEBUG("init allocation set, socket=%d,"
" context = %d\n", i, context);
break;
}
}
if (i >= MAX_CONTEXT) {
for (i = 0; i < MAX_CONTEXT; i++) {
if (!global_ppriv[i].used) {
global_ppriv[i].context = context;
global_ppriv[i].used = 1;
global_ppriv[i].sets[0] = via_setInit();
global_ppriv[i].sets[1] = via_setInit();
DRM_DEBUG("init allocation set, socket=%d,"
" context = %d\n", i, context);
break;
}
}
if ((i >= MAX_CONTEXT) || (global_ppriv[i].sets[0] == NULL) ||
(global_ppriv[i].sets[1] == NULL)) {
return 0;
}
}
if ((i >= MAX_CONTEXT) || (global_ppriv[i].sets[0] == NULL) ||
(global_ppriv[i].sets[1] == NULL)) {
return 0;
}
}
return 1;
return 1;
}
int via_final_context(struct drm_device *dev, int context)
{
int i;
for (i=0; i<MAX_CONTEXT; i++)
if (global_ppriv[i].used &&
(global_ppriv[i].context == context))
break;
int i;
for (i = 0; i < MAX_CONTEXT; i++)
if (global_ppriv[i].used &&
(global_ppriv[i].context == context))
break;
if (i < MAX_CONTEXT) {
set_t *set;
unsigned int item;
int retval;
if (i < MAX_CONTEXT) {
set_t *set;
unsigned int item;
int retval;
DRM_DEBUG("find socket %d, context = %d\n", i, context);
DRM_DEBUG("find socket %d, context = %d\n", i, context);
/* Video Memory */
set = global_ppriv[i].sets[0];
retval = via_setFirst(set, &item);
while (retval) {
DRM_DEBUG("free video memory 0x%x\n", item);
via_mmFreeMem((PMemBlock)item);
retval = via_setNext(set, &item);
}
via_setDestroy(set);
/* Video Memory */
set = global_ppriv[i].sets[0];
retval = via_setFirst(set, &item);
while (retval) {
DRM_DEBUG("free video memory 0x%x\n", item);
via_mmFreeMem((PMemBlock) item);
retval = via_setNext(set, &item);
}
via_setDestroy(set);
/* AGP Memory */
set = global_ppriv[i].sets[1];
retval = via_setFirst(set, &item);
while (retval) {
DRM_DEBUG("free agp memory 0x%x\n", item);
via_mmFreeMem((PMemBlock)item);
retval = via_setNext(set, &item);
}
via_setDestroy(set);
global_ppriv[i].used = 0;
}
/* AGP Memory */
set = global_ppriv[i].sets[1];
retval = via_setFirst(set, &item);
while (retval) {
DRM_DEBUG("free agp memory 0x%x\n", item);
via_mmFreeMem((PMemBlock) item);
retval = via_setNext(set, &item);
}
via_setDestroy(set);
global_ppriv[i].used = 0;
}
#if defined(__linux__)
/* Linux specific until context tracking code gets ported to BSD */
/* Linux specific until context tracking code gets ported to BSD */
/* Last context, perform cleanup */
if (dev->ctx_count == 1 && dev->dev_private) {
if (dev->irq) drm_irq_uninstall(dev);
if (dev->irq)
drm_irq_uninstall(dev);
via_do_cleanup_map(dev);
}
#endif
return 1;
}
int via_mem_alloc( DRM_IOCTL_ARGS)
{
drm_via_mem_t mem;
DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t *)data, sizeof(mem));
switch (mem.type) {
case VIDEO :
if (via_fb_alloc(&mem) < 0)
return -EFAULT;
DRM_COPY_TO_USER_IOCTL((drm_via_mem_t *)data, mem,
sizeof(mem));
return 0;
case AGP :
if (via_agp_alloc(&mem) < 0)
return -EFAULT;
DRM_COPY_TO_USER_IOCTL((drm_via_mem_t *)data, mem,
sizeof(mem));
return 0;
}
return -EFAULT;
return 1;
}
int via_fb_alloc(drm_via_mem_t* mem)
int via_mem_alloc(DRM_IOCTL_ARGS)
{
drm_via_mm_t fb;
PMemBlock block;
int retval = 0;
drm_via_mem_t mem;
if (!FBHeap)
return -1;
fb.size = mem->size;
fb.context = mem->context;
block = via_mmAllocMem(FBHeap, fb.size, 5, 0);
if (block) {
fb.offset = block->ofs;
fb.free = (unsigned int)block;
if (!add_alloc_set(fb.context, VIDEO, fb.free)) {
DRM_DEBUG("adding to allocation set fails\n");
via_mmFreeMem((PMemBlock)fb.free);
retval = -1;
}
}
else {
fb.offset = 0;
fb.size = 0;
fb.free = 0;
retval = -1;
}
mem->offset = fb.offset;
mem->index = fb.free;
DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb.size,
(int)fb.offset);
return retval;
}
int via_agp_alloc(drm_via_mem_t* mem)
{
drm_via_mm_t agp;
PMemBlock block;
int retval = 0;
if (!AgpHeap)
return -1;
agp.size = mem->size;
agp.context = mem->context;
block = via_mmAllocMem(AgpHeap, agp.size, 5, 0);
if (block) {
agp.offset = block->ofs;
agp.free = (unsigned int)block;
if (!add_alloc_set(agp.context, AGP, agp.free)) {
DRM_DEBUG("adding to allocation set fails\n");
via_mmFreeMem((PMemBlock)agp.free);
retval = -1;
}
}
else {
agp.offset = 0;
agp.size = 0;
agp.free = 0;
}
mem->offset = agp.offset;
mem->index = agp.free;
DRM_DEBUG("alloc agp, size = %d, offset = %d\n", agp.size,
(unsigned int)agp.offset);
return retval;
}
int via_mem_free( DRM_IOCTL_ARGS )
{
drm_via_mem_t mem;
DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t *)data, sizeof(mem));
switch (mem.type) {
case VIDEO :
if (via_fb_free(&mem) == 0)
return 0;
break;
case AGP :
if (via_agp_free(&mem) == 0)
return 0;
break;
}
return -EFAULT;
}
int via_fb_free(drm_via_mem_t* mem)
{
drm_via_mm_t fb;
int retval = 0;
if (!FBHeap) {
return -1;
}
fb.free = mem->index;
fb.context = mem->context;
if (!fb.free)
{
return -1;
}
via_mmFreeMem((PMemBlock)fb.free);
if (!del_alloc_set(fb.context, VIDEO, fb.free))
{
retval = -1;
}
DRM_DEBUG("free fb, free = %d\n", fb.free);
return retval;
}
int via_agp_free(drm_via_mem_t* mem)
{
drm_via_mm_t agp;
int retval = 0;
agp.free = mem->index;
agp.context = mem->context;
if (!agp.free)
return -1;
via_mmFreeMem((PMemBlock)agp.free);
if (!del_alloc_set(agp.context, AGP, agp.free)) {
retval = -1;
DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t *) data, sizeof(mem));
switch (mem.type) {
case VIDEO:
if (via_fb_alloc(&mem) < 0)
return -EFAULT;
DRM_COPY_TO_USER_IOCTL((drm_via_mem_t *) data, mem,
sizeof(mem));
return 0;
case AGP:
if (via_agp_alloc(&mem) < 0)
return -EFAULT;
DRM_COPY_TO_USER_IOCTL((drm_via_mem_t *) data, mem,
sizeof(mem));
return 0;
}
DRM_DEBUG("free agp, free = %d\n", agp.free);
return retval;
return -EFAULT;
}
int via_fb_alloc(drm_via_mem_t * mem)
{
drm_via_mm_t fb;
PMemBlock block;
int retval = 0;
if (!FBHeap)
return -1;
fb.size = mem->size;
fb.context = mem->context;
block = via_mmAllocMem(FBHeap, fb.size, 5, 0);
if (block) {
fb.offset = block->ofs;
fb.free = (unsigned int)block;
if (!add_alloc_set(fb.context, VIDEO, fb.free)) {
DRM_DEBUG("adding to allocation set fails\n");
via_mmFreeMem((PMemBlock) fb.free);
retval = -1;
}
} else {
fb.offset = 0;
fb.size = 0;
fb.free = 0;
retval = -1;
}
mem->offset = fb.offset;
mem->index = fb.free;
DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb.size,
(int)fb.offset);
return retval;
}
int via_agp_alloc(drm_via_mem_t * mem)
{
drm_via_mm_t agp;
PMemBlock block;
int retval = 0;
if (!AgpHeap)
return -1;
agp.size = mem->size;
agp.context = mem->context;
block = via_mmAllocMem(AgpHeap, agp.size, 5, 0);
if (block) {
agp.offset = block->ofs;
agp.free = (unsigned int)block;
if (!add_alloc_set(agp.context, AGP, agp.free)) {
DRM_DEBUG("adding to allocation set fails\n");
via_mmFreeMem((PMemBlock) agp.free);
retval = -1;
}
} else {
agp.offset = 0;
agp.size = 0;
agp.free = 0;
}
mem->offset = agp.offset;
mem->index = agp.free;
DRM_DEBUG("alloc agp, size = %d, offset = %d\n", agp.size,
(unsigned int)agp.offset);
return retval;
}
int via_mem_free(DRM_IOCTL_ARGS)
{
drm_via_mem_t mem;
DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t *) data, sizeof(mem));
switch (mem.type) {
case VIDEO:
if (via_fb_free(&mem) == 0)
return 0;
break;
case AGP:
if (via_agp_free(&mem) == 0)
return 0;
break;
}
return -EFAULT;
}
int via_fb_free(drm_via_mem_t * mem)
{
drm_via_mm_t fb;
int retval = 0;
if (!FBHeap) {
return -1;
}
fb.free = mem->index;
fb.context = mem->context;
if (!fb.free) {
return -1;
}
via_mmFreeMem((PMemBlock) fb.free);
if (!del_alloc_set(fb.context, VIDEO, fb.free)) {
retval = -1;
}
DRM_DEBUG("free fb, free = %d\n", fb.free);
return retval;
}
int via_agp_free(drm_via_mem_t * mem)
{
drm_via_mm_t agp;
int retval = 0;
agp.free = mem->index;
agp.context = mem->context;
if (!agp.free)
return -1;
via_mmFreeMem((PMemBlock) agp.free);
if (!del_alloc_set(agp.context, AGP, agp.free)) {
retval = -1;
}
DRM_DEBUG("free agp, free = %d\n", agp.free);
return retval;
}

View File

@ -25,21 +25,21 @@
#define _via_drm_mm_h_
typedef struct {
unsigned int context;
unsigned int size;
unsigned long offset;
unsigned int free;
unsigned int context;
unsigned int size;
unsigned long offset;
unsigned int free;
} drm_via_mm_t;
typedef struct {
unsigned int size;
unsigned long handle;
void *virtual;
unsigned int size;
unsigned long handle;
void *virtual;
} drm_via_dma_t;
int via_fb_alloc(drm_via_mem_t *mem);
int via_fb_free(drm_via_mem_t *mem);
int via_agp_alloc(drm_via_mem_t *mem);
int via_agp_free(drm_via_mem_t *mem);
int via_fb_alloc(drm_via_mem_t * mem);
int via_fb_free(drm_via_mem_t * mem);
int via_agp_alloc(drm_via_mem_t * mem);
int via_agp_free(drm_via_mem_t * mem);
#endif