Lindent of core build. Drivers checked for no binary diffs. A few files
weren't Lindent's because their comments didn't convert very well. A bunch of other minor clean up with no code implact included.main
parent
368493edc9
commit
9f9a8f1382
|
@ -92,8 +92,7 @@ static void drm_ati_free_pcigart_table( unsigned long address )
|
|||
}
|
||||
|
||||
int drm_ati_pcigart_init(drm_device_t * dev,
|
||||
unsigned long *addr,
|
||||
dma_addr_t *bus_addr)
|
||||
unsigned long *addr, dma_addr_t * bus_addr)
|
||||
{
|
||||
drm_sg_mem_t *entry = dev->sg;
|
||||
unsigned long address = 0;
|
||||
|
@ -137,9 +136,9 @@ int drm_ati_pcigart_init( drm_device_t *dev,
|
|||
for (i = 0; i < pages; i++) {
|
||||
/* we need to support large memory configurations */
|
||||
entry->busaddr[i] = pci_map_single(dev->pdev,
|
||||
page_address( entry->pagelist[i] ),
|
||||
PAGE_SIZE,
|
||||
PCI_DMA_TODEVICE);
|
||||
page_address(entry->
|
||||
pagelist[i]),
|
||||
PAGE_SIZE, PCI_DMA_TODEVICE);
|
||||
if (entry->busaddr[i] == 0) {
|
||||
DRM_ERROR("unable to map PCIGART pages!\n");
|
||||
drm_ati_pcigart_cleanup(dev, address, bus_address);
|
||||
|
@ -170,8 +169,7 @@ done:
|
|||
}
|
||||
|
||||
int drm_ati_pcigart_cleanup(drm_device_t * dev,
|
||||
unsigned long addr,
|
||||
dma_addr_t bus_addr)
|
||||
unsigned long addr, dma_addr_t bus_addr)
|
||||
{
|
||||
drm_sg_mem_t *entry = dev->sg;
|
||||
unsigned long pages;
|
||||
|
@ -192,7 +190,8 @@ int drm_ati_pcigart_cleanup( drm_device_t *dev,
|
|||
? entry->pages : ATI_MAX_PCIGART_PAGES;
|
||||
|
||||
for (i = 0; i < pages; i++) {
|
||||
if ( !entry->busaddr[i] ) break;
|
||||
if (!entry->busaddr[i])
|
||||
break;
|
||||
pci_unmap_single(dev->pdev, entry->busaddr[i],
|
||||
PAGE_SIZE, PCI_DMA_TODEVICE);
|
||||
}
|
||||
|
|
|
@ -34,7 +34,6 @@
|
|||
#ifndef _DRM_P_H_
|
||||
#define _DRM_P_H_
|
||||
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#ifdef __alpha__
|
||||
/* add include of current.h so that "current" is defined
|
||||
|
@ -153,10 +152,8 @@
|
|||
|
||||
/*@}*/
|
||||
|
||||
|
||||
#include "drm_compat.h"
|
||||
|
||||
|
||||
/***********************************************************************/
|
||||
/** \name Macros to make printk easier */
|
||||
/*@{*/
|
||||
|
@ -212,7 +209,6 @@
|
|||
|
||||
/*@}*/
|
||||
|
||||
|
||||
/***********************************************************************/
|
||||
/** \name Internal types and structures */
|
||||
/*@{*/
|
||||
|
@ -335,7 +331,6 @@ typedef struct drm_buf {
|
|||
void *dev_private; /**< Per-buffer private storage */
|
||||
} drm_buf_t;
|
||||
|
||||
|
||||
/** bufs is one longer than it has to be */
|
||||
typedef struct drm_waitlist {
|
||||
int count; /**< Number of possible buffers */
|
||||
|
@ -508,7 +503,6 @@ typedef struct drm_ctx_list {
|
|||
drm_file_t *tag; /**< associated fd private data */
|
||||
} drm_ctx_list_t;
|
||||
|
||||
|
||||
typedef struct drm_vbl_sig {
|
||||
struct list_head head;
|
||||
unsigned int sequence;
|
||||
|
@ -516,7 +510,6 @@ typedef struct drm_vbl_sig {
|
|||
struct task_struct *task;
|
||||
} drm_vbl_sig_t;
|
||||
|
||||
|
||||
/**
|
||||
* DRM device functions structure
|
||||
*/
|
||||
|
@ -539,7 +532,8 @@ struct drm_driver_fn {
|
|||
int (*dma_quiescent) (struct drm_device *);
|
||||
int (*context_ctor) (struct drm_device * dev, int context);
|
||||
int (*context_dtor) (struct drm_device * dev, int context);
|
||||
int (*kernel_context_switch)(struct drm_device *dev, int old, int new);
|
||||
int (*kernel_context_switch) (struct drm_device * dev, int old,
|
||||
int new);
|
||||
int (*kernel_context_switch_unlock) (struct drm_device * dev);
|
||||
int (*vblank_wait) (struct drm_device * dev, unsigned int *sequence);
|
||||
/* these have to be filled in */
|
||||
|
@ -562,7 +556,6 @@ struct drm_driver_fn {
|
|||
struct file_operations fops;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* DRM device structure.
|
||||
*/
|
||||
|
@ -707,7 +700,8 @@ typedef struct drm_minor {
|
|||
struct proc_dir_entry *dev_root; /**< proc directory entry */
|
||||
} drm_minor_t;
|
||||
|
||||
static __inline__ int drm_core_check_feature(struct drm_device *dev, int feature)
|
||||
static __inline__ int drm_core_check_feature(struct drm_device *dev,
|
||||
int feature)
|
||||
{
|
||||
return ((dev->fn_tbl->driver_features & feature) ? 1 : 0);
|
||||
}
|
||||
|
@ -741,7 +735,8 @@ extern int drm_cpu_valid( void );
|
|||
|
||||
/* Driver support (drm_drv.h) */
|
||||
extern int drm_fb_loaded;
|
||||
extern int __devinit drm_init(struct pci_driver *driver, struct pci_device_id* pciidlist,
|
||||
extern int __devinit drm_init(struct pci_driver *driver,
|
||||
struct pci_device_id *pciidlist,
|
||||
struct drm_driver_fn *driver_fn);
|
||||
extern void __exit drm_exit(struct pci_driver *driver);
|
||||
extern void __exit drm_cleanup_pci(struct pci_dev *pdev);
|
||||
|
@ -763,9 +758,10 @@ extern int drm_release(struct inode *inode, struct file *filp);
|
|||
extern void drm_vm_open(struct vm_area_struct *vma);
|
||||
extern void drm_vm_close(struct vm_area_struct *vma);
|
||||
extern void drm_vm_shm_close(struct vm_area_struct *vma);
|
||||
extern int drm_mmap_dma(struct file *filp,
|
||||
struct vm_area_struct *vma);
|
||||
extern int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma);
|
||||
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
extern unsigned long drm_core_get_map_ofs(drm_map_t * map);
|
||||
extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
|
||||
|
||||
/* Memory management support (drm_memory.h) */
|
||||
#include "drm_memory.h"
|
||||
|
@ -773,11 +769,9 @@ extern void drm_mem_init(void);
|
|||
extern int drm_mem_info(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data);
|
||||
extern void *drm_calloc(size_t nmemb, size_t size, int area);
|
||||
extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size,
|
||||
int area);
|
||||
extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
|
||||
extern unsigned long drm_alloc_pages(int order, int area);
|
||||
extern void drm_free_pages(unsigned long address, int order,
|
||||
int area);
|
||||
extern void drm_free_pages(unsigned long address, int order, int area);
|
||||
extern DRM_AGP_MEM *drm_alloc_agp(int pages, u32 type);
|
||||
extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
|
||||
extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
|
||||
|
@ -835,7 +829,6 @@ extern int drm_adddraw(struct inode *inode, struct file *filp,
|
|||
extern int drm_rmdraw(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
|
||||
|
||||
/* Authentication IOCTL support (drm_auth.h) */
|
||||
extern int drm_add_magic(drm_device_t * dev, drm_file_t * priv,
|
||||
drm_magic_t magic);
|
||||
|
@ -850,14 +843,12 @@ extern int drm_lock(struct inode *inode, struct file *filp,
|
|||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_unlock(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_lock_take(__volatile__ unsigned int *lock,
|
||||
unsigned int context);
|
||||
extern int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context);
|
||||
extern int drm_lock_transfer(drm_device_t * dev,
|
||||
__volatile__ unsigned int *lock,
|
||||
unsigned int context);
|
||||
extern int drm_lock_free(drm_device_t * dev,
|
||||
__volatile__ unsigned int *lock,
|
||||
unsigned int context);
|
||||
__volatile__ unsigned int *lock, unsigned int context);
|
||||
extern int drm_notifier(void *priv);
|
||||
|
||||
/* Buffer management support (drm_bufs.h) */
|
||||
|
@ -899,7 +890,6 @@ extern int drm_wait_vblank(struct inode *inode, struct file *filp,
|
|||
extern int drm_vblank_wait(drm_device_t * dev, unsigned int *vbl_seq);
|
||||
extern void drm_vbl_send_signals(drm_device_t * dev);
|
||||
|
||||
|
||||
/* AGP/GART support (drm_agpsupport.h) */
|
||||
extern const drm_agp_t *drm_agp;
|
||||
extern drm_agp_head_t *drm_agp_init(void);
|
||||
|
@ -929,7 +919,8 @@ extern int drm_agp_unbind_memory(DRM_AGP_MEM *handle);
|
|||
extern int drm_probe(struct pci_dev *pdev, const struct pci_device_id *ent,
|
||||
struct drm_driver_fn *driver_fn);
|
||||
extern int drm_put_minor(drm_device_t * dev);
|
||||
extern int drm_get_secondary_minor(drm_device_t *dev, drm_minor_t **sec_minor);
|
||||
extern int drm_get_secondary_minor(drm_device_t * dev,
|
||||
drm_minor_t ** sec_minor);
|
||||
extern int drm_put_secondary_minor(drm_minor_t * sec_minor);
|
||||
extern unsigned int cards_limit;
|
||||
extern drm_minor_t *drm_minors;
|
||||
|
@ -954,11 +945,9 @@ extern int drm_sg_free(struct inode *inode, struct file *filp,
|
|||
|
||||
/* ATI PCIGART support (ati_pcigart.h) */
|
||||
extern int drm_ati_pcigart_init(drm_device_t * dev,
|
||||
unsigned long *addr,
|
||||
dma_addr_t *bus_addr);
|
||||
unsigned long *addr, dma_addr_t * bus_addr);
|
||||
extern int drm_ati_pcigart_cleanup(drm_device_t * dev,
|
||||
unsigned long addr,
|
||||
dma_addr_t bus_addr);
|
||||
unsigned long addr, dma_addr_t bus_addr);
|
||||
|
||||
extern void *drm_pci_alloc(drm_device_t * dev, size_t size,
|
||||
size_t align, dma_addr_t maxaddr,
|
||||
|
@ -968,37 +957,43 @@ extern void drm_pci_free(drm_device_t *dev, size_t size,
|
|||
|
||||
/* sysfs support (drm_sysfs.c) */
|
||||
struct drm_sysfs_class;
|
||||
extern struct drm_sysfs_class *drm_sysfs_create(struct module *owner, char *name);
|
||||
extern struct drm_sysfs_class *drm_sysfs_create(struct module *owner,
|
||||
char *name);
|
||||
extern void drm_sysfs_destroy(struct drm_sysfs_class *cs);
|
||||
extern struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs,
|
||||
dev_t dev, struct device *device, const char *fmt, ...);
|
||||
dev_t dev,
|
||||
struct device *device,
|
||||
const char *fmt, ...);
|
||||
extern void drm_sysfs_device_remove(dev_t dev);
|
||||
|
||||
|
||||
/* Inline replacements for DRM_IOREMAP macros */
|
||||
static __inline__ void drm_core_ioremap(struct drm_map *map, struct drm_device *dev)
|
||||
static __inline__ void drm_core_ioremap(struct drm_map *map,
|
||||
struct drm_device *dev)
|
||||
{
|
||||
map->handle = drm_ioremap(map->offset, map->size, dev);
|
||||
}
|
||||
|
||||
static __inline__ void drm_core_ioremap_nocache(struct drm_map *map, struct drm_device *dev)
|
||||
static __inline__ void drm_core_ioremap_nocache(struct drm_map *map,
|
||||
struct drm_device *dev)
|
||||
{
|
||||
map->handle = drm_ioremap_nocache(map->offset, map->size, dev);
|
||||
}
|
||||
|
||||
static __inline__ void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
|
||||
static __inline__ void drm_core_ioremapfree(struct drm_map *map,
|
||||
struct drm_device *dev)
|
||||
{
|
||||
if (map->handle && map->size)
|
||||
drm_ioremapfree(map->handle, map->size, dev);
|
||||
}
|
||||
|
||||
static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, unsigned long offset)
|
||||
static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
|
||||
unsigned long offset)
|
||||
{
|
||||
struct list_head *_list;
|
||||
list_for_each(_list, &dev->maplist->head) {
|
||||
drm_map_list_t *_entry = list_entry( _list, drm_map_list_t, head );
|
||||
if ( _entry->map &&
|
||||
_entry->map->offset == offset ) {
|
||||
drm_map_list_t *_entry =
|
||||
list_entry(_list, drm_map_list_t, head);
|
||||
if (_entry->map && _entry->map->offset == offset) {
|
||||
return _entry->map;
|
||||
}
|
||||
}
|
||||
|
@ -1028,7 +1023,5 @@ extern void drm_free(void *pt, size_t size, int area);
|
|||
|
||||
/*@}*/
|
||||
|
||||
extern unsigned long drm_core_get_map_ofs(drm_map_t *map);
|
||||
extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
|
||||
#endif /* __KERNEL__ */
|
||||
#endif
|
||||
|
|
|
@ -291,7 +291,8 @@ int drm_agp_unbind(struct inode *inode, struct file *filp,
|
|||
|
||||
if (!dev->agp || !dev->agp->acquired)
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&request, (drm_agp_binding_t __user *)arg, sizeof(request)))
|
||||
if (copy_from_user
|
||||
(&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
|
||||
return -EFAULT;
|
||||
if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
|
||||
return -EINVAL;
|
||||
|
@ -328,7 +329,8 @@ int drm_agp_bind(struct inode *inode, struct file *filp,
|
|||
|
||||
if (!dev->agp || !dev->agp->acquired || !drm_agp->bind_memory)
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&request, (drm_agp_binding_t __user *)arg, sizeof(request)))
|
||||
if (copy_from_user
|
||||
(&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
|
||||
return -EFAULT;
|
||||
if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
|
||||
return -EINVAL;
|
||||
|
@ -367,7 +369,8 @@ int drm_agp_free(struct inode *inode, struct file *filp,
|
|||
|
||||
if (!dev->agp || !dev->agp->acquired)
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&request, (drm_agp_buffer_t __user *)arg, sizeof(request)))
|
||||
if (copy_from_user
|
||||
(&request, (drm_agp_buffer_t __user *) arg, sizeof(request)))
|
||||
return -EFAULT;
|
||||
if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
|
||||
return -EINVAL;
|
||||
|
|
|
@ -96,7 +96,8 @@ int drm_add_magic(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic)
|
|||
|
||||
hash = drm_hash_magic(magic);
|
||||
entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC);
|
||||
if (!entry) return -ENOMEM;
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
memset(entry, 0, sizeof(*entry));
|
||||
entry->magic = magic;
|
||||
entry->priv = priv;
|
||||
|
@ -130,7 +131,6 @@ int drm_remove_magic(drm_device_t *dev, drm_magic_t magic)
|
|||
drm_magic_entry_t *pt;
|
||||
int hash;
|
||||
|
||||
|
||||
DRM_DEBUG("%d\n", magic);
|
||||
hash = drm_hash_magic(magic);
|
||||
|
||||
|
@ -185,7 +185,8 @@ int drm_getmagic(struct inode *inode, struct file *filp,
|
|||
} else {
|
||||
do {
|
||||
spin_lock(&lock);
|
||||
if (!sequence) ++sequence; /* reserve 0 */
|
||||
if (!sequence)
|
||||
++sequence; /* reserve 0 */
|
||||
auth.magic = sequence++;
|
||||
spin_unlock(&lock);
|
||||
} while (drm_find_file(dev, auth.magic));
|
||||
|
|
|
@ -41,7 +41,8 @@
|
|||
* type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
|
||||
* applicable and if supported by the kernel.
|
||||
*/
|
||||
int drm_initmap( drm_device_t *dev, unsigned int offset, unsigned int size, int type, int flags )
|
||||
int drm_initmap(drm_device_t * dev, unsigned int offset, unsigned int size,
|
||||
int type, int flags)
|
||||
{
|
||||
drm_map_t *map;
|
||||
drm_map_list_t *list;
|
||||
|
@ -64,13 +65,8 @@ int drm_initmap( drm_device_t *dev, unsigned int offset, unsigned int size, int
|
|||
}
|
||||
|
||||
*map = (drm_map_t) {
|
||||
.offset = offset,
|
||||
.size = size,
|
||||
.type = type,
|
||||
.flags = flags,
|
||||
.mtrr = -1,
|
||||
.handle = 0,
|
||||
};
|
||||
.offset = offset,.size = size,.type = type,.flags =
|
||||
flags,.mtrr = -1,.handle = 0,};
|
||||
list->map = map;
|
||||
|
||||
DRM_DEBUG("initmap offset = 0x%08lx, size = 0x%08lx, type = %d\n",
|
||||
|
@ -123,7 +119,8 @@ int drm_addmap( struct inode *inode, struct file *filp,
|
|||
drm_map_t __user *argp = (void __user *)arg;
|
||||
drm_map_list_t *list;
|
||||
|
||||
if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
|
||||
if (!(filp->f_mode & 3))
|
||||
return -EACCES; /* Require read/write */
|
||||
|
||||
map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
|
||||
if (!map)
|
||||
|
@ -159,19 +156,30 @@ int drm_addmap( struct inode *inode, struct file *filp,
|
|||
|
||||
/* If permanent maps are implemented, maps must match */
|
||||
if (dev->fn_tbl->permanent_maps) {
|
||||
DRM_DEBUG( "Looking for: offset = 0x%08lx, size = 0x%08lx, type = %d\n",
|
||||
DRM_DEBUG
|
||||
("Looking for: offset = 0x%08lx, size = 0x%08lx, type = %d\n",
|
||||
map->offset, map->size, map->type);
|
||||
list_for_each(_list, &dev->maplist->head) {
|
||||
drm_map_list_t *_entry = list_entry( _list, drm_map_list_t, head );
|
||||
DRM_DEBUG( "Checking: offset = 0x%08lx, size = 0x%08lx, type = %d\n",
|
||||
_entry->map->offset, _entry->map->size, _entry->map->type );
|
||||
if ( _entry->map && map->type == _entry->map->type &&
|
||||
map->offset == _entry->map->offset ) {
|
||||
drm_map_list_t *_entry =
|
||||
list_entry(_list, drm_map_list_t,
|
||||
head);
|
||||
DRM_DEBUG
|
||||
("Checking: offset = 0x%08lx, size = 0x%08lx, type = %d\n",
|
||||
_entry->map->offset,
|
||||
_entry->map->size,
|
||||
_entry->map->type);
|
||||
if (_entry->map
|
||||
&& map->type == _entry->map->type
|
||||
&& map->offset ==
|
||||
_entry->map->offset) {
|
||||
_entry->map->size = map->size;
|
||||
drm_free( map, sizeof(*map), DRM_MEM_MAPS );
|
||||
drm_free(map, sizeof(*map),
|
||||
DRM_MEM_MAPS);
|
||||
map = _entry->map;
|
||||
DRM_DEBUG( "Found existing: offset = 0x%08lx, size = 0x%08lx, type = %d\n",
|
||||
map->offset, map->size, map->type );
|
||||
DRM_DEBUG
|
||||
("Found existing: offset = 0x%08lx, size = 0x%08lx, type = %d\n",
|
||||
map->offset, map->size,
|
||||
map->type);
|
||||
goto found_it;
|
||||
}
|
||||
}
|
||||
|
@ -191,13 +199,14 @@ int drm_addmap( struct inode *inode, struct file *filp,
|
|||
if (drm_core_has_MTRR(dev)) {
|
||||
if (map->type == _DRM_FRAME_BUFFER ||
|
||||
(map->flags & _DRM_WRITE_COMBINING)) {
|
||||
map->mtrr = mtrr_add( map->offset, map->size,
|
||||
map->mtrr =
|
||||
mtrr_add(map->offset, map->size,
|
||||
MTRR_TYPE_WRCOMB, 1);
|
||||
}
|
||||
}
|
||||
if (map->type == _DRM_REGISTERS)
|
||||
map->handle = drm_ioremap( map->offset, map->size,
|
||||
dev );
|
||||
map->handle =
|
||||
drm_ioremap(map->offset, map->size, dev);
|
||||
break;
|
||||
}
|
||||
case _DRM_SHM:
|
||||
|
@ -216,8 +225,7 @@ int drm_addmap( struct inode *inode, struct file *filp,
|
|||
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
|
||||
return -EBUSY;
|
||||
}
|
||||
dev->sigdata.lock =
|
||||
dev->lock.hw_lock = map->handle; /* Pointer to lock */
|
||||
dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */
|
||||
}
|
||||
break;
|
||||
case _DRM_AGP:
|
||||
|
@ -258,14 +266,12 @@ found_it:
|
|||
return -EFAULT;
|
||||
if (map->type != _DRM_SHM) {
|
||||
if (copy_to_user(&argp->handle,
|
||||
&map->offset,
|
||||
sizeof(map->offset) ) )
|
||||
&map->offset, sizeof(map->offset)))
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Remove a map private from list and deallocate resources if the mapping
|
||||
* isn't in use.
|
||||
|
@ -294,8 +300,7 @@ int drm_rmmap(struct inode *inode, struct file *filp,
|
|||
drm_map_t request;
|
||||
int found_maps = 0;
|
||||
|
||||
if (copy_from_user(&request, (drm_map_t __user *)arg,
|
||||
sizeof(request))) {
|
||||
if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) {
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
|
@ -306,7 +311,8 @@ int drm_rmmap(struct inode *inode, struct file *filp,
|
|||
|
||||
if (r_list->map &&
|
||||
r_list->map->handle == request.handle &&
|
||||
r_list->map->flags & _DRM_REMOVABLE) break;
|
||||
r_list->map->flags & _DRM_REMOVABLE)
|
||||
break;
|
||||
}
|
||||
|
||||
/* List has wrapped around to the head pointer, or its empty we didn't
|
||||
|
@ -327,7 +333,8 @@ int drm_rmmap(struct inode *inode, struct file *filp,
|
|||
drm_free(list, sizeof(*list), DRM_MEM_MAPS);
|
||||
|
||||
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
|
||||
if (pt->vma->vm_private_data == map) found_maps++;
|
||||
if (pt->vma->vm_private_data == map)
|
||||
found_maps++;
|
||||
}
|
||||
|
||||
if (!found_maps) {
|
||||
|
@ -364,14 +371,12 @@ static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
|
|||
for (i = 0; i < entry->seg_count; i++) {
|
||||
if (entry->seglist[i]) {
|
||||
drm_free_pages(entry->seglist[i],
|
||||
entry->page_order,
|
||||
DRM_MEM_DMA);
|
||||
entry->page_order, DRM_MEM_DMA);
|
||||
}
|
||||
}
|
||||
drm_free(entry->seglist,
|
||||
entry->seg_count *
|
||||
sizeof(*entry->seglist),
|
||||
DRM_MEM_SEGS);
|
||||
sizeof(*entry->seglist), DRM_MEM_SEGS);
|
||||
|
||||
entry->seg_count = 0;
|
||||
}
|
||||
|
@ -386,8 +391,7 @@ static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
|
|||
}
|
||||
drm_free(entry->buflist,
|
||||
entry->buf_count *
|
||||
sizeof(*entry->buflist),
|
||||
DRM_MEM_BUFS);
|
||||
sizeof(*entry->buflist), DRM_MEM_BUFS);
|
||||
|
||||
entry->buf_count = 0;
|
||||
}
|
||||
|
@ -429,10 +433,10 @@ int drm_addbufs_agp( struct inode *inode, struct file *filp,
|
|||
drm_buf_t **temp_buflist;
|
||||
drm_buf_desc_t __user *argp = (void __user *)arg;
|
||||
|
||||
if ( !dma ) return -EINVAL;
|
||||
if (!dma)
|
||||
return -EINVAL;
|
||||
|
||||
if ( copy_from_user( &request, argp,
|
||||
sizeof(request) ) )
|
||||
if (copy_from_user(&request, argp, sizeof(request)))
|
||||
return -EFAULT;
|
||||
|
||||
count = request.count;
|
||||
|
@ -455,8 +459,10 @@ int drm_addbufs_agp( struct inode *inode, struct file *filp,
|
|||
DRM_DEBUG("page_order: %d\n", page_order);
|
||||
DRM_DEBUG("total: %d\n", total);
|
||||
|
||||
if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
|
||||
if ( dev->queue_count ) return -EBUSY; /* Not while in use */
|
||||
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
|
||||
return -EINVAL;
|
||||
if (dev->queue_count)
|
||||
return -EBUSY; /* Not while in use */
|
||||
|
||||
spin_lock(&dev->count_lock);
|
||||
if (dev->buf_use) {
|
||||
|
@ -511,8 +517,7 @@ int drm_addbufs_agp( struct inode *inode, struct file *filp,
|
|||
buf->filp = NULL;
|
||||
|
||||
buf->dev_priv_size = dev->fn_tbl->dev_priv_size;
|
||||
buf->dev_private = drm_alloc( buf->dev_priv_size,
|
||||
DRM_MEM_BUFS );
|
||||
buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
|
||||
if (!buf->dev_private) {
|
||||
/* Set count correctly so we free the proper amount. */
|
||||
entry->buf_count = count;
|
||||
|
@ -523,8 +528,7 @@ int drm_addbufs_agp( struct inode *inode, struct file *filp,
|
|||
}
|
||||
memset(buf->dev_private, 0, buf->dev_priv_size);
|
||||
|
||||
DRM_DEBUG( "buffer %d @ %p\n",
|
||||
entry->buf_count, buf->address );
|
||||
DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
|
||||
|
||||
offset += alignment;
|
||||
entry->buf_count++;
|
||||
|
@ -536,8 +540,7 @@ int drm_addbufs_agp( struct inode *inode, struct file *filp,
|
|||
temp_buflist = drm_realloc(dma->buflist,
|
||||
dma->buf_count * sizeof(*dma->buflist),
|
||||
(dma->buf_count + entry->buf_count)
|
||||
* sizeof(*dma->buflist),
|
||||
DRM_MEM_BUFS );
|
||||
* sizeof(*dma->buflist), DRM_MEM_BUFS);
|
||||
if (!temp_buflist) {
|
||||
/* Free the entry because it isn't valid */
|
||||
drm_cleanup_buf_error(dev, entry);
|
||||
|
@ -596,9 +599,11 @@ int drm_addbufs_pci( struct inode *inode, struct file *filp,
|
|||
drm_buf_t **temp_buflist;
|
||||
drm_buf_desc_t __user *argp = (void __user *)arg;
|
||||
|
||||
if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL;
|
||||
if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
|
||||
return -EINVAL;
|
||||
|
||||
if ( !dma ) return -EINVAL;
|
||||
if (!dma)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&request, argp, sizeof(request)))
|
||||
return -EFAULT;
|
||||
|
@ -608,11 +613,12 @@ int drm_addbufs_pci( struct inode *inode, struct file *filp,
|
|||
size = 1 << order;
|
||||
|
||||
DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
|
||||
request.count, request.size, size,
|
||||
order, dev->queue_count );
|
||||
request.count, request.size, size, order, dev->queue_count);
|
||||
|
||||
if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
|
||||
if ( dev->queue_count ) return -EBUSY; /* Not while in use */
|
||||
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
|
||||
return -EINVAL;
|
||||
if (dev->queue_count)
|
||||
return -EBUSY; /* Not while in use */
|
||||
|
||||
alignment = (request.flags & _DRM_PAGE_ALIGN)
|
||||
? PAGE_ALIGN(size) : size;
|
||||
|
@ -654,8 +660,7 @@ int drm_addbufs_pci( struct inode *inode, struct file *filp,
|
|||
DRM_MEM_SEGS);
|
||||
if (!entry->seglist) {
|
||||
drm_free(entry->buflist,
|
||||
count * sizeof(*entry->buflist),
|
||||
DRM_MEM_BUFS );
|
||||
count * sizeof(*entry->buflist), DRM_MEM_BUFS);
|
||||
up(&dev->struct_sem);
|
||||
atomic_dec(&dev->buf_alloc);
|
||||
return -ENOMEM;
|
||||
|
@ -666,22 +671,18 @@ int drm_addbufs_pci( struct inode *inode, struct file *filp,
|
|||
* have succeeded
|
||||
*/
|
||||
temp_pagelist = drm_alloc((dma->page_count + (count << page_order))
|
||||
* sizeof(*dma->pagelist),
|
||||
DRM_MEM_PAGES );
|
||||
* sizeof(*dma->pagelist), DRM_MEM_PAGES);
|
||||
if (!temp_pagelist) {
|
||||
drm_free(entry->buflist,
|
||||
count * sizeof(*entry->buflist),
|
||||
DRM_MEM_BUFS );
|
||||
count * sizeof(*entry->buflist), DRM_MEM_BUFS);
|
||||
drm_free(entry->seglist,
|
||||
count * sizeof(*entry->seglist),
|
||||
DRM_MEM_SEGS );
|
||||
count * sizeof(*entry->seglist), DRM_MEM_SEGS);
|
||||
up(&dev->struct_sem);
|
||||
atomic_dec(&dev->buf_alloc);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memcpy(temp_pagelist,
|
||||
dma->pagelist,
|
||||
dma->page_count * sizeof(*dma->pagelist));
|
||||
dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
|
||||
DRM_DEBUG("pagelist: %d entries\n",
|
||||
dma->page_count + (count << page_order));
|
||||
|
||||
|
@ -699,8 +700,7 @@ int drm_addbufs_pci( struct inode *inode, struct file *filp,
|
|||
drm_cleanup_buf_error(dev, entry);
|
||||
drm_free(temp_pagelist,
|
||||
(dma->page_count + (count << page_order))
|
||||
* sizeof(*dma->pagelist),
|
||||
DRM_MEM_PAGES );
|
||||
* sizeof(*dma->pagelist), DRM_MEM_PAGES);
|
||||
up(&dev->struct_sem);
|
||||
atomic_dec(&dev->buf_alloc);
|
||||
return -ENOMEM;
|
||||
|
@ -739,7 +739,8 @@ int drm_addbufs_pci( struct inode *inode, struct file *filp,
|
|||
entry->seg_count = count;
|
||||
drm_cleanup_buf_error(dev, entry);
|
||||
drm_free(temp_pagelist,
|
||||
(dma->page_count + (count << page_order))
|
||||
(dma->page_count +
|
||||
(count << page_order))
|
||||
* sizeof(*dma->pagelist),
|
||||
DRM_MEM_PAGES);
|
||||
up(&dev->struct_sem);
|
||||
|
@ -757,15 +758,13 @@ int drm_addbufs_pci( struct inode *inode, struct file *filp,
|
|||
temp_buflist = drm_realloc(dma->buflist,
|
||||
dma->buf_count * sizeof(*dma->buflist),
|
||||
(dma->buf_count + entry->buf_count)
|
||||
* sizeof(*dma->buflist),
|
||||
DRM_MEM_BUFS );
|
||||
* sizeof(*dma->buflist), DRM_MEM_BUFS);
|
||||
if (!temp_buflist) {
|
||||
/* Free the entry because it isn't valid */
|
||||
drm_cleanup_buf_error(dev, entry);
|
||||
drm_free(temp_pagelist,
|
||||
(dma->page_count + (count << page_order))
|
||||
* sizeof(*dma->pagelist),
|
||||
DRM_MEM_PAGES );
|
||||
* sizeof(*dma->pagelist), DRM_MEM_PAGES);
|
||||
up(&dev->struct_sem);
|
||||
atomic_dec(&dev->buf_alloc);
|
||||
return -ENOMEM;
|
||||
|
@ -826,9 +825,11 @@ int drm_addbufs_sg( struct inode *inode, struct file *filp,
|
|||
int i;
|
||||
drm_buf_t **temp_buflist;
|
||||
|
||||
if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL;
|
||||
if (!drm_core_check_feature(dev, DRIVER_SG))
|
||||
return -EINVAL;
|
||||
|
||||
if ( !dma ) return -EINVAL;
|
||||
if (!dma)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&request, argp, sizeof(request)))
|
||||
return -EFAULT;
|
||||
|
@ -853,8 +854,10 @@ int drm_addbufs_sg( struct inode *inode, struct file *filp,
|
|||
DRM_DEBUG("page_order: %d\n", page_order);
|
||||
DRM_DEBUG("total: %d\n", total);
|
||||
|
||||
if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
|
||||
if ( dev->queue_count ) return -EBUSY; /* Not while in use */
|
||||
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
|
||||
return -EINVAL;
|
||||
if (dev->queue_count)
|
||||
return -EBUSY; /* Not while in use */
|
||||
|
||||
spin_lock(&dev->count_lock);
|
||||
if (dev->buf_use) {
|
||||
|
@ -922,8 +925,7 @@ int drm_addbufs_sg( struct inode *inode, struct file *filp,
|
|||
|
||||
memset(buf->dev_private, 0, buf->dev_priv_size);
|
||||
|
||||
DRM_DEBUG( "buffer %d @ %p\n",
|
||||
entry->buf_count, buf->address );
|
||||
DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
|
||||
|
||||
offset += alignment;
|
||||
entry->buf_count++;
|
||||
|
@ -935,8 +937,7 @@ int drm_addbufs_sg( struct inode *inode, struct file *filp,
|
|||
temp_buflist = drm_realloc(dma->buflist,
|
||||
dma->buf_count * sizeof(*dma->buflist),
|
||||
(dma->buf_count + entry->buf_count)
|
||||
* sizeof(*dma->buflist),
|
||||
DRM_MEM_BUFS );
|
||||
* sizeof(*dma->buflist), DRM_MEM_BUFS);
|
||||
if (!temp_buflist) {
|
||||
/* Free the entry because it isn't valid */
|
||||
drm_cleanup_buf_error(dev, entry);
|
||||
|
@ -1009,7 +1010,6 @@ int drm_addbufs( struct inode *inode, struct file *filp,
|
|||
return drm_addbufs_pci(inode, filp, cmd, arg);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get information about the buffer mappings.
|
||||
*
|
||||
|
@ -1041,7 +1041,8 @@ int drm_infobufs( struct inode *inode, struct file *filp,
|
|||
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
|
||||
return -EINVAL;
|
||||
|
||||
if ( !dma ) return -EINVAL;
|
||||
if (!dma)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&dev->count_lock);
|
||||
if (atomic_read(&dev->buf_alloc)) {
|
||||
|
@ -1055,7 +1056,8 @@ int drm_infobufs( struct inode *inode, struct file *filp,
|
|||
return -EFAULT;
|
||||
|
||||
for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
|
||||
if ( dma->bufs[i].buf_count ) ++count;
|
||||
if (dma->bufs[i].buf_count)
|
||||
++count;
|
||||
}
|
||||
|
||||
DRM_DEBUG("count = %d\n", count);
|
||||
|
@ -1063,7 +1065,8 @@ int drm_infobufs( struct inode *inode, struct file *filp,
|
|||
if (request.count >= count) {
|
||||
for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
|
||||
if (dma->bufs[i].buf_count) {
|
||||
drm_buf_desc_t __user *to = &request.list[count];
|
||||
drm_buf_desc_t __user *to =
|
||||
&request.list[count];
|
||||
drm_buf_entry_t *from = &dma->bufs[i];
|
||||
drm_freelist_t *list = &dma->bufs[i].freelist;
|
||||
if (copy_to_user(&to->count,
|
||||
|
@ -1125,17 +1128,18 @@ int drm_markbufs( struct inode *inode, struct file *filp,
|
|||
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
|
||||
return -EINVAL;
|
||||
|
||||
if ( !dma ) return -EINVAL;
|
||||
if (!dma)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&request,
|
||||
(drm_buf_desc_t __user *)arg,
|
||||
sizeof(request) ) )
|
||||
(drm_buf_desc_t __user *) arg, sizeof(request)))
|
||||
return -EFAULT;
|
||||
|
||||
DRM_DEBUG("%d, %d, %d\n",
|
||||
request.size, request.low_mark, request.high_mark);
|
||||
order = get_order(request.size);
|
||||
if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
|
||||
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
|
||||
return -EINVAL;
|
||||
entry = &dma->bufs[order];
|
||||
|
||||
if (request.low_mark < 0 || request.low_mark > entry->buf_count)
|
||||
|
@ -1175,18 +1179,16 @@ int drm_freebufs( struct inode *inode, struct file *filp,
|
|||
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
|
||||
return -EINVAL;
|
||||
|
||||
if ( !dma ) return -EINVAL;
|
||||
if (!dma)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&request,
|
||||
(drm_buf_free_t __user *)arg,
|
||||
sizeof(request) ) )
|
||||
(drm_buf_free_t __user *) arg, sizeof(request)))
|
||||
return -EFAULT;
|
||||
|
||||
DRM_DEBUG("%d\n", request.count);
|
||||
for (i = 0; i < request.count; i++) {
|
||||
if ( copy_from_user( &idx,
|
||||
&request.list[i],
|
||||
sizeof(idx) ) )
|
||||
if (copy_from_user(&idx, &request.list[i], sizeof(idx)))
|
||||
return -EFAULT;
|
||||
if (idx < 0 || idx >= dma->buf_count) {
|
||||
DRM_ERROR("Index %d (of %d max)\n",
|
||||
|
@ -1235,7 +1237,8 @@ int drm_mapbufs( struct inode *inode, struct file *filp,
|
|||
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
|
||||
return -EINVAL;
|
||||
|
||||
if ( !dma ) return -EINVAL;
|
||||
if (!dma)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&dev->count_lock);
|
||||
if (atomic_read(&dev->buf_alloc)) {
|
||||
|
@ -1249,15 +1252,15 @@ int drm_mapbufs( struct inode *inode, struct file *filp,
|
|||
return -EFAULT;
|
||||
|
||||
if (request.count >= dma->buf_count) {
|
||||
if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
|
||||
(drm_core_check_feature(dev, DRIVER_SG) && (dma->flags & _DRM_DMA_USE_SG))) {
|
||||
if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
|
||||
|| (drm_core_check_feature(dev, DRIVER_SG)
|
||||
&& (dma->flags & _DRM_DMA_USE_SG))) {
|
||||
drm_map_t *map = dev->agp_buffer_map;
|
||||
|
||||
if (!map) {
|
||||
retcode = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
#if LINUX_VERSION_CODE <= 0x020402
|
||||
down(¤t->mm->mmap_sem);
|
||||
#else
|
||||
|
@ -1308,15 +1311,13 @@ int drm_mapbufs( struct inode *inode, struct file *filp,
|
|||
goto done;
|
||||
}
|
||||
if (copy_to_user(&request.list[i].used,
|
||||
&zero,
|
||||
sizeof(zero) ) ) {
|
||||
&zero, sizeof(zero))) {
|
||||
retcode = -EFAULT;
|
||||
goto done;
|
||||
}
|
||||
address = virtual + dma->buflist[i]->offset; /* *** */
|
||||
if (copy_to_user(&request.list[i].address,
|
||||
&address,
|
||||
sizeof(address) ) ) {
|
||||
&address, sizeof(address))) {
|
||||
retcode = -EFAULT;
|
||||
goto done;
|
||||
}
|
||||
|
@ -1331,4 +1332,3 @@ int drm_mapbufs( struct inode *inode, struct file *filp,
|
|||
|
||||
return retcode;
|
||||
}
|
||||
|
||||
|
|
|
@ -130,21 +130,38 @@ struct device;
|
|||
#define pci_dev_put(x) do {} while (0)
|
||||
#define pci_get_subsys pci_find_subsys
|
||||
|
||||
static inline struct class_device *DRM(sysfs_device_add)(struct drm_sysfs_class *cs, dev_t dev, struct device *device, const char *fmt, ...){return NULL;}
|
||||
static inline struct class_device *DRM(sysfs_device_add) (struct drm_sysfs_class
|
||||
* cs, dev_t dev,
|
||||
struct device *
|
||||
device,
|
||||
const char *fmt,
|
||||
...) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void DRM(sysfs_device_remove)(dev_t dev){}
|
||||
static inline void DRM(sysfs_device_remove) (dev_t dev) {
|
||||
}
|
||||
|
||||
static inline void DRM(sysfs_destroy)(struct drm_sysfs_class *cs){}
|
||||
static inline void DRM(sysfs_destroy) (struct drm_sysfs_class * cs) {
|
||||
}
|
||||
|
||||
static inline struct drm_sysfs_class *DRM(sysfs_create)(struct module *owner, char *name) { return NULL; }
|
||||
static inline struct drm_sysfs_class *DRM(sysfs_create) (struct module * owner,
|
||||
char *name) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifndef pci_pretty_name
|
||||
#define pci_pretty_name(x) x->name
|
||||
#endif
|
||||
|
||||
struct drm_device;
|
||||
static inline int radeon_create_i2c_busses(struct drm_device *dev){return 0;};
|
||||
static inline void radeon_delete_i2c_busses(struct drm_device *dev){};
|
||||
static inline int radeon_create_i2c_busses(struct drm_device *dev)
|
||||
{
|
||||
return 0;
|
||||
};
|
||||
static inline void radeon_delete_i2c_busses(struct drm_device *dev)
|
||||
{
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -58,8 +58,10 @@
|
|||
*/
|
||||
void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
|
||||
{
|
||||
if ( ctx_handle < 0 ) goto failed;
|
||||
if ( !dev->ctx_bitmap ) goto failed;
|
||||
if (ctx_handle < 0)
|
||||
goto failed;
|
||||
if (!dev->ctx_bitmap)
|
||||
goto failed;
|
||||
|
||||
if (ctx_handle < DRM_MAX_CTXBITMAP) {
|
||||
down(&dev->struct_sem);
|
||||
|
@ -69,8 +71,7 @@ void drm_ctxbitmap_free( drm_device_t *dev, int ctx_handle )
|
|||
return;
|
||||
}
|
||||
failed:
|
||||
DRM_ERROR( "Attempt to free invalid context handle: %d\n",
|
||||
ctx_handle );
|
||||
DRM_ERROR("Attempt to free invalid context handle: %d\n", ctx_handle);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -88,7 +89,8 @@ int drm_ctxbitmap_next( drm_device_t *dev )
|
|||
{
|
||||
int bit;
|
||||
|
||||
if(!dev->ctx_bitmap) return -1;
|
||||
if (!dev->ctx_bitmap)
|
||||
return -1;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP);
|
||||
|
@ -101,10 +103,13 @@ int drm_ctxbitmap_next( drm_device_t *dev )
|
|||
drm_map_t **ctx_sareas;
|
||||
|
||||
ctx_sareas = drm_realloc(dev->context_sareas,
|
||||
(dev->max_context - 1) *
|
||||
sizeof(*dev->context_sareas),
|
||||
(dev->max_context -
|
||||
1) *
|
||||
sizeof(*dev->
|
||||
context_sareas),
|
||||
dev->max_context *
|
||||
sizeof(*dev->context_sareas),
|
||||
sizeof(*dev->
|
||||
context_sareas),
|
||||
DRM_MEM_MAPS);
|
||||
if (!ctx_sareas) {
|
||||
clear_bit(bit, dev->ctx_bitmap);
|
||||
|
@ -115,8 +120,8 @@ int drm_ctxbitmap_next( drm_device_t *dev )
|
|||
dev->context_sareas[bit] = NULL;
|
||||
} else {
|
||||
/* max_context == 1 at this point */
|
||||
dev->context_sareas = drm_alloc(
|
||||
dev->max_context *
|
||||
dev->context_sareas =
|
||||
drm_alloc(dev->max_context *
|
||||
sizeof(*dev->context_sareas),
|
||||
DRM_MEM_MAPS);
|
||||
if (!dev->context_sareas) {
|
||||
|
@ -178,10 +183,10 @@ int drm_ctxbitmap_init( drm_device_t *dev )
|
|||
void drm_ctxbitmap_cleanup(drm_device_t * dev)
|
||||
{
|
||||
down(&dev->struct_sem);
|
||||
if( dev->context_sareas ) drm_free( dev->context_sareas,
|
||||
if (dev->context_sareas)
|
||||
drm_free(dev->context_sareas,
|
||||
sizeof(*dev->context_sareas) *
|
||||
dev->max_context,
|
||||
DRM_MEM_MAPS );
|
||||
dev->max_context, DRM_MEM_MAPS);
|
||||
drm_free((void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP);
|
||||
up(&dev->struct_sem);
|
||||
}
|
||||
|
@ -217,7 +222,8 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
|
|||
return -EFAULT;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
if (dev->max_context < 0 || request.ctx_id >= (unsigned) dev->max_context) {
|
||||
if (dev->max_context < 0
|
||||
|| request.ctx_id >= (unsigned)dev->max_context) {
|
||||
up(&dev->struct_sem);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -254,15 +260,13 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
|
|||
struct list_head *list;
|
||||
|
||||
if (copy_from_user(&request,
|
||||
(drm_ctx_priv_map_t __user *)arg,
|
||||
sizeof(request)))
|
||||
(drm_ctx_priv_map_t __user *) arg, sizeof(request)))
|
||||
return -EFAULT;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
list_for_each(list, &dev->maplist->head) {
|
||||
r_list = list_entry(list, drm_map_list_t, head);
|
||||
if(r_list->map &&
|
||||
r_list->map->handle == request.handle)
|
||||
if (r_list->map && r_list->map->handle == request.handle)
|
||||
goto found;
|
||||
}
|
||||
bad:
|
||||
|
@ -271,7 +275,8 @@ bad:
|
|||
|
||||
found:
|
||||
map = r_list->map;
|
||||
if (!map) goto bad;
|
||||
if (!map)
|
||||
goto bad;
|
||||
if (dev->max_context < 0)
|
||||
goto bad;
|
||||
if (request.ctx_id >= (unsigned)dev->max_context)
|
||||
|
@ -304,7 +309,6 @@ int drm_context_switch( drm_device_t *dev, int old, int new )
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
||||
DRM_DEBUG("Context switch from %d to %d\n", old, new);
|
||||
|
||||
if (new == dev->last_context) {
|
||||
|
@ -368,8 +372,7 @@ int drm_resctx( struct inode *inode, struct file *filp,
|
|||
memset(&ctx, 0, sizeof(ctx));
|
||||
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
|
||||
ctx.handle = i;
|
||||
if ( copy_to_user( &res.contexts[i],
|
||||
&i, sizeof(i) ) )
|
||||
if (copy_to_user(&res.contexts[i], &i, sizeof(i)))
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
|
@ -415,8 +418,7 @@ int drm_addctx( struct inode *inode, struct file *filp,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if ( ctx.handle != DRM_KERNEL_CONTEXT )
|
||||
{
|
||||
if (ctx.handle != DRM_KERNEL_CONTEXT) {
|
||||
if (dev->fn_tbl->context_ctor)
|
||||
dev->fn_tbl->context_ctor(dev, ctx.handle);
|
||||
}
|
||||
|
@ -575,4 +577,3 @@ int drm_rmctx( struct inode *inode, struct file *filp,
|
|||
}
|
||||
|
||||
/*@}*/
|
||||
|
||||
|
|
|
@ -35,4 +35,3 @@
|
|||
#define DRIVER_MAJOR 1
|
||||
#define DRIVER_MINOR 0
|
||||
#define DRIVER_PATCHLEVEL 0
|
||||
|
||||
|
|
|
@ -72,7 +72,8 @@ void drm_dma_takedown(drm_device_t *dev)
|
|||
drm_device_dma_t *dma = dev->dma;
|
||||
int i, j;
|
||||
|
||||
if (!dma) return;
|
||||
if (!dma)
|
||||
return;
|
||||
|
||||
/* Clear dma buffers */
|
||||
for (i = 0; i <= DRM_MAX_ORDER; i++) {
|
||||
|
@ -91,28 +92,26 @@ void drm_dma_takedown(drm_device_t *dev)
|
|||
}
|
||||
drm_free(dma->bufs[i].seglist,
|
||||
dma->bufs[i].seg_count
|
||||
* sizeof(*dma->bufs[0].seglist),
|
||||
DRM_MEM_SEGS);
|
||||
* sizeof(*dma->bufs[0].seglist), DRM_MEM_SEGS);
|
||||
}
|
||||
if (dma->bufs[i].buf_count) {
|
||||
for (j = 0; j < dma->bufs[i].buf_count; j++) {
|
||||
if (dma->bufs[i].buflist[j].dev_private) {
|
||||
drm_free(dma->bufs[i].buflist[j].dev_private,
|
||||
dma->bufs[i].buflist[j].dev_priv_size,
|
||||
DRM_MEM_BUFS);
|
||||
drm_free(dma->bufs[i].buflist[j].
|
||||
dev_private,
|
||||
dma->bufs[i].buflist[j].
|
||||
dev_priv_size, DRM_MEM_BUFS);
|
||||
}
|
||||
}
|
||||
drm_free(dma->bufs[i].buflist,
|
||||
dma->bufs[i].buf_count *
|
||||
sizeof(*dma->bufs[0].buflist),
|
||||
DRM_MEM_BUFS);
|
||||
sizeof(*dma->bufs[0].buflist), DRM_MEM_BUFS);
|
||||
}
|
||||
}
|
||||
|
||||
if (dma->buflist) {
|
||||
drm_free(dma->buflist,
|
||||
dma->buf_count * sizeof(*dma->buflist),
|
||||
DRM_MEM_BUFS);
|
||||
dma->buf_count * sizeof(*dma->buflist), DRM_MEM_BUFS);
|
||||
}
|
||||
|
||||
if (dma->pagelist) {
|
||||
|
@ -124,7 +123,6 @@ void drm_dma_takedown(drm_device_t *dev)
|
|||
dev->dma = NULL;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Free a buffer.
|
||||
*
|
||||
|
@ -135,14 +133,16 @@ void drm_dma_takedown(drm_device_t *dev)
|
|||
*/
|
||||
void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf)
|
||||
{
|
||||
if (!buf) return;
|
||||
if (!buf)
|
||||
return;
|
||||
|
||||
buf->waiting = 0;
|
||||
buf->pending = 0;
|
||||
buf->filp = NULL;
|
||||
buf->used = 0;
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && waitqueue_active(&buf->dma_wait)) {
|
||||
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)
|
||||
&& waitqueue_active(&buf->dma_wait)) {
|
||||
wake_up_interruptible(&buf->dma_wait);
|
||||
}
|
||||
}
|
||||
|
@ -161,7 +161,8 @@ void drm_core_reclaim_buffers( struct file *filp )
|
|||
drm_device_dma_t *dma = dev->dma;
|
||||
int i;
|
||||
|
||||
if (!dma) return;
|
||||
if (!dma)
|
||||
return;
|
||||
for (i = 0; i < dma->buf_count; i++) {
|
||||
if (dma->buflist[i]->filp == filp) {
|
||||
switch (dma->buflist[i]->list) {
|
||||
|
@ -179,4 +180,3 @@ void drm_core_reclaim_buffers( struct file *filp )
|
|||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_core_reclaim_buffers);
|
||||
|
||||
|
|
|
@ -120,6 +120,7 @@ drm_ioctl_desc_t drm_ioctls[] = {
|
|||
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = {drm_mapbufs, 1, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = {drm_freebufs, 1, 0},
|
||||
/* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = {NULL, 1, 0},
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = {drm_control, 1, 1},
|
||||
|
||||
|
@ -165,7 +166,8 @@ int drm_takedown( drm_device_t *dev )
|
|||
if (dev->fn_tbl->pretakedown)
|
||||
dev->fn_tbl->pretakedown(dev);
|
||||
|
||||
if ( dev->irq_enabled ) drm_irq_uninstall( dev );
|
||||
if (dev->irq_enabled)
|
||||
drm_irq_uninstall(dev);
|
||||
|
||||
down(&dev->struct_sem);
|
||||
del_timer(&dev->timer);
|
||||
|
@ -177,8 +179,7 @@ int drm_takedown( drm_device_t *dev )
|
|||
}
|
||||
|
||||
if (dev->unique) {
|
||||
drm_free( dev->unique, strlen( dev->unique ) + 1,
|
||||
DRM_MEM_DRIVER );
|
||||
drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
|
||||
dev->unique = NULL;
|
||||
dev->unique_len = 0;
|
||||
}
|
||||
|
@ -200,13 +201,15 @@ int drm_takedown( drm_device_t *dev )
|
|||
intact until drv_cleanup is called. */
|
||||
for (entry = dev->agp->memory; entry; entry = nexte) {
|
||||
nexte = entry->next;
|
||||
if ( entry->bound ) drm_unbind_agp( entry->memory );
|
||||
if (entry->bound)
|
||||
drm_unbind_agp(entry->memory);
|
||||
drm_free_agp(entry->memory, entry->pages);
|
||||
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
|
||||
}
|
||||
dev->agp->memory = NULL;
|
||||
|
||||
if ( dev->agp->acquired ) drm_agp_do_release();
|
||||
if (dev->agp->acquired)
|
||||
drm_agp_do_release();
|
||||
|
||||
dev->agp->acquired = 0;
|
||||
dev->agp->enabled = 0;
|
||||
|
@ -242,7 +245,8 @@ int drm_takedown( drm_device_t *dev )
|
|||
break;
|
||||
case _DRM_SCATTER_GATHER:
|
||||
/* Handle it */
|
||||
if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
|
||||
if (drm_core_check_feature
|
||||
(dev, DRIVER_SG) && dev->sg) {
|
||||
drm_sg_cleanup(dev->sg);
|
||||
dev->sg = NULL;
|
||||
}
|
||||
|
@ -255,7 +259,6 @@ int drm_takedown( drm_device_t *dev )
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
|
||||
for (i = 0; i < dev->queue_count; i++) {
|
||||
|
||||
|
@ -315,7 +318,9 @@ MODULE_PARM( drm_opts, "s" );
|
|||
* Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
|
||||
* after the initialization for driver customization.
|
||||
*/
|
||||
int __devinit drm_init( struct pci_driver *driver, struct pci_device_id* pciidlist, struct drm_driver_fn *driver_fn)
|
||||
int __devinit drm_init(struct pci_driver *driver,
|
||||
struct pci_device_id *pciidlist,
|
||||
struct drm_driver_fn *driver_fn)
|
||||
{
|
||||
struct pci_dev *pdev;
|
||||
struct pci_device_id *pid;
|
||||
|
@ -334,10 +339,13 @@ int __devinit drm_init( struct pci_driver *driver, struct pci_device_id* pciidli
|
|||
|
||||
pdev = NULL;
|
||||
/* pass back in pdev to account for multiple identical cards */
|
||||
while ((pdev = pci_get_subsys(pid->vendor, pid->device, pid->subvendor, pid->subdevice, pdev))) {
|
||||
while ((pdev =
|
||||
pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
|
||||
pid->subdevice, pdev))) {
|
||||
/* is there already a driver loaded, or (short circuit saves work) */
|
||||
/* does something like VesaFB have control of the memory region? */
|
||||
if (pci_dev_driver(pdev) || pci_request_regions(pdev, "DRM scan")) {
|
||||
if (pci_dev_driver(pdev)
|
||||
|| pci_request_regions(pdev, "DRM scan")) {
|
||||
/* go into stealth mode */
|
||||
drm_fb_loaded = 1;
|
||||
pci_dev_put(pdev);
|
||||
|
@ -356,7 +364,10 @@ int __devinit drm_init( struct pci_driver *driver, struct pci_device_id* pciidli
|
|||
|
||||
pdev = NULL;
|
||||
/* pass back in pdev to account for multiple identical cards */
|
||||
while ((pdev = pci_get_subsys(pid->vendor, pid->device, pid->subvendor, pid->subdevice, pdev))) {
|
||||
while ((pdev =
|
||||
pci_get_subsys(pid->vendor, pid->device,
|
||||
pid->subvendor, pid->subdevice,
|
||||
pdev))) {
|
||||
/* stealth mode requires a manual probe */
|
||||
drm_probe(pdev, &pciidlist[i], driver_fn);
|
||||
}
|
||||
|
@ -395,17 +406,22 @@ static void __exit drm_cleanup( drm_device_t *dev )
|
|||
if ((map = r_list->map)) {
|
||||
switch (map->type) {
|
||||
case _DRM_REGISTERS:
|
||||
drm_ioremapfree( map->handle, map->size, dev );
|
||||
drm_ioremapfree(map->handle, map->size,
|
||||
dev);
|
||||
break;
|
||||
|
||||
case _DRM_FRAME_BUFFER:
|
||||
if (drm_core_has_MTRR(dev)) {
|
||||
if (map->mtrr >= 0) {
|
||||
int retcode;
|
||||
retcode = mtrr_del( map->mtrr,
|
||||
map->offset,
|
||||
retcode =
|
||||
mtrr_del(map->mtrr,
|
||||
map->
|
||||
offset,
|
||||
map->size);
|
||||
DRM_DEBUG( "mtrr_del=%d\n", retcode );
|
||||
DRM_DEBUG
|
||||
("mtrr_del=%d\n",
|
||||
retcode);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -429,7 +445,8 @@ static void __exit drm_cleanup( drm_device_t *dev )
|
|||
|
||||
drm_ctxbitmap_cleanup(dev);
|
||||
|
||||
if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp && dev->agp->agp_mtrr >= 0) {
|
||||
if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp
|
||||
&& dev->agp->agp_mtrr >= 0) {
|
||||
int retval;
|
||||
retval = mtrr_del(dev->agp->agp_mtrr,
|
||||
dev->agp->agp_info.aper_base,
|
||||
|
@ -483,9 +500,9 @@ static int __init drm_core_init(void)
|
|||
{
|
||||
int ret = -ENOMEM;
|
||||
|
||||
cards_limit = (cards_limit < DRM_MAX_MINOR + 1 ? cards_limit : DRM_MAX_MINOR + 1);
|
||||
drm_minors = drm_calloc(cards_limit,
|
||||
sizeof(*drm_minors), DRM_MEM_STUB);
|
||||
cards_limit =
|
||||
(cards_limit < DRM_MAX_MINOR + 1 ? cards_limit : DRM_MAX_MINOR + 1);
|
||||
drm_minors = drm_calloc(cards_limit, sizeof(*drm_minors), DRM_MEM_STUB);
|
||||
if (!drm_minors)
|
||||
goto err_p1;
|
||||
|
||||
|
@ -509,11 +526,7 @@ static int __init drm_core_init(void)
|
|||
|
||||
DRM_INFO("Initialized %s %d.%d.%d %s\n",
|
||||
DRIVER_NAME,
|
||||
DRIVER_MAJOR,
|
||||
DRIVER_MINOR,
|
||||
DRIVER_PATCHLEVEL,
|
||||
DRIVER_DATE
|
||||
);
|
||||
DRIVER_MAJOR, DRIVER_MINOR, DRIVER_PATCHLEVEL, DRIVER_DATE);
|
||||
return 0;
|
||||
err_p3:
|
||||
drm_sysfs_destroy(drm_class);
|
||||
|
@ -534,14 +547,12 @@ static void __exit drm_core_exit (void)
|
|||
|
||||
unregister_chrdev(DRM_MAJOR, "drm");
|
||||
|
||||
drm_free(drm_minors, sizeof(*drm_minors) *
|
||||
cards_limit, DRM_MEM_STUB);
|
||||
drm_free(drm_minors, sizeof(*drm_minors) * cards_limit, DRM_MEM_STUB);
|
||||
}
|
||||
|
||||
module_init(drm_core_init);
|
||||
module_exit(drm_core_exit);
|
||||
|
||||
|
||||
/**
|
||||
* Get version information
|
||||
*
|
||||
|
@ -606,7 +617,8 @@ int drm_ioctl( struct inode *inode, struct file *filp,
|
|||
|
||||
if (nr < DRIVER_IOCTL_COUNT)
|
||||
ioctl = &drm_ioctls[nr];
|
||||
else if ((nr >= DRM_COMMAND_BASE) || (nr < DRM_COMMAND_BASE + dev->fn_tbl->num_ioctls))
|
||||
else if ((nr >= DRM_COMMAND_BASE)
|
||||
|| (nr < DRM_COMMAND_BASE + dev->fn_tbl->num_ioctls))
|
||||
ioctl = &dev->fn_tbl->ioctls[nr - DRM_COMMAND_BASE];
|
||||
else
|
||||
goto err_i1;
|
||||
|
@ -626,8 +638,8 @@ int drm_ioctl( struct inode *inode, struct file *filp,
|
|||
}
|
||||
err_i1:
|
||||
atomic_dec(&dev->ioctl_count);
|
||||
if (retcode) DRM_DEBUG( "ret = %x\n", retcode);
|
||||
if (retcode)
|
||||
DRM_DEBUG("ret = %x\n", retcode);
|
||||
return retcode;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_ioctl);
|
||||
|
||||
|
|
|
@ -49,8 +49,7 @@ static int drm_setup( drm_device_t *dev )
|
|||
dev->buf_use = 0;
|
||||
atomic_set(&dev->buf_alloc, 0);
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
|
||||
{
|
||||
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
|
||||
i = drm_dma_setup(dev);
|
||||
if (i < 0)
|
||||
return i;
|
||||
|
@ -64,9 +63,9 @@ static int drm_setup( drm_device_t *dev )
|
|||
dev->magiclist[i].tail = NULL;
|
||||
}
|
||||
|
||||
dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist),
|
||||
DRM_MEM_CTXLIST);
|
||||
if(dev->ctxlist == NULL) return -ENOMEM;
|
||||
dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist), DRM_MEM_CTXLIST);
|
||||
if (dev->ctxlist == NULL)
|
||||
return -ENOMEM;
|
||||
memset(dev->ctxlist, 0, sizeof(*dev->ctxlist));
|
||||
INIT_LIST_HEAD(&dev->ctxlist->head);
|
||||
|
||||
|
@ -112,7 +111,6 @@ static int drm_setup( drm_device_t *dev )
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Open file.
|
||||
*
|
||||
|
@ -205,13 +203,16 @@ int drm_open_helper(struct inode *inode, struct file *filp, drm_device_t *dev)
|
|||
drm_file_t *priv;
|
||||
int ret;
|
||||
|
||||
if (filp->f_flags & O_EXCL) return -EBUSY; /* No exclusive opens */
|
||||
if (!drm_cpu_valid()) return -EINVAL;
|
||||
if (filp->f_flags & O_EXCL)
|
||||
return -EBUSY; /* No exclusive opens */
|
||||
if (!drm_cpu_valid())
|
||||
return -EINVAL;
|
||||
|
||||
DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
|
||||
|
||||
priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
|
||||
if(!priv) return -ENOMEM;
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(priv, 0, sizeof(*priv));
|
||||
filp->private_data = priv;
|
||||
|
@ -250,10 +251,12 @@ int drm_open_helper(struct inode *inode, struct file *filp, drm_device_t *dev)
|
|||
if (!dev->hose) {
|
||||
struct pci_dev *pci_dev;
|
||||
pci_dev = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
|
||||
if (pci_dev) dev->hose = pci_dev->sysdata;
|
||||
if (pci_dev)
|
||||
dev->hose = pci_dev->sysdata;
|
||||
if (!dev->hose) {
|
||||
struct pci_bus *b = pci_bus_b(pci_root_buses.next);
|
||||
if (b) dev->hose = b->sysdata;
|
||||
if (b)
|
||||
dev->hose = b->sysdata;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -272,9 +275,11 @@ int drm_fasync(int fd, struct file *filp, int on)
|
|||
drm_device_t *dev = priv->dev;
|
||||
int retcode;
|
||||
|
||||
DRM_DEBUG("fd = %d, device = 0x%lx\n", fd, (long)old_encode_dev(dev->device));
|
||||
DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
|
||||
(long)old_encode_dev(dev->device));
|
||||
retcode = fasync_helper(fd, filp, on, &dev->buf_async);
|
||||
if (retcode < 0) return retcode;
|
||||
if (retcode < 0)
|
||||
return retcode;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fasync);
|
||||
|
@ -310,14 +315,14 @@ int drm_release( struct inode *inode, struct file *filp )
|
|||
*/
|
||||
|
||||
DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
|
||||
current->pid, (long)old_encode_dev(dev->device), dev->open_count );
|
||||
current->pid, (long)old_encode_dev(dev->device),
|
||||
dev->open_count);
|
||||
|
||||
if (priv->lock_count && dev->lock.hw_lock &&
|
||||
_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
|
||||
dev->lock.filp == filp) {
|
||||
DRM_DEBUG("File %p released, freeing lock for context %d\n",
|
||||
filp,
|
||||
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
|
||||
filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
|
||||
|
||||
if (dev->fn_tbl->release)
|
||||
dev->fn_tbl->release(dev, filp);
|
||||
|
@ -329,8 +334,8 @@ int drm_release( struct inode *inode, struct file *filp )
|
|||
hardware at this point, possibly
|
||||
processed via a callback to the X
|
||||
server. */
|
||||
}
|
||||
else if ( dev->fn_tbl->release && priv->lock_count && dev->lock.hw_lock ) {
|
||||
} else if (dev->fn_tbl->release && priv->lock_count
|
||||
&& dev->lock.hw_lock) {
|
||||
/* The lock is required to reclaim buffers */
|
||||
DECLARE_WAITQUEUE(entry, current);
|
||||
|
||||
|
@ -366,8 +371,7 @@ int drm_release( struct inode *inode, struct file *filp )
|
|||
}
|
||||
}
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
|
||||
{
|
||||
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
|
||||
dev->fn_tbl->reclaim_buffers(filp);
|
||||
}
|
||||
|
||||
|
@ -381,7 +385,8 @@ int drm_release( struct inode *inode, struct file *filp )
|
|||
if (pos->tag == priv &&
|
||||
pos->handle != DRM_KERNEL_CONTEXT) {
|
||||
if (dev->fn_tbl->context_dtor)
|
||||
dev->fn_tbl->context_dtor(dev, pos->handle);
|
||||
dev->fn_tbl->context_dtor(dev,
|
||||
pos->handle);
|
||||
|
||||
drm_ctxbitmap_free(dev, pos->handle);
|
||||
|
||||
|
@ -426,8 +431,7 @@ int drm_release( struct inode *inode, struct file *filp )
|
|||
if (!--dev->open_count) {
|
||||
if (atomic_read(&dev->ioctl_count) || dev->blocked) {
|
||||
DRM_ERROR("Device busy: %d %d\n",
|
||||
atomic_read( &dev->ioctl_count ),
|
||||
dev->blocked );
|
||||
atomic_read(&dev->ioctl_count), dev->blocked);
|
||||
spin_unlock(&dev->count_lock);
|
||||
unlock_kernel();
|
||||
return -EBUSY;
|
||||
|
|
|
@ -55,9 +55,13 @@ static void drm_parse_option(char *s)
|
|||
char *c, *r;
|
||||
|
||||
DRM_DEBUG("\"%s\"\n", s);
|
||||
if (!s || !*s) return;
|
||||
if (!s || !*s)
|
||||
return;
|
||||
for (c = s; *c && *c != ':'; c++) ; /* find : or \0 */
|
||||
if (*c) r = c + 1; else r = NULL; /* remember remainder */
|
||||
if (*c)
|
||||
r = c + 1;
|
||||
else
|
||||
r = NULL; /* remember remainder */
|
||||
*c = '\0'; /* terminate */
|
||||
if (!strcmp(s, "debug")) {
|
||||
drm_flags |= DRM_FLAG_DEBUG;
|
||||
|
@ -102,11 +106,15 @@ void drm_parse_options(char *s)
|
|||
char *h, *t, *n;
|
||||
|
||||
DRM_DEBUG("\"%s\"\n", s ? : "");
|
||||
if (!s || !*s) return;
|
||||
if (!s || !*s)
|
||||
return;
|
||||
|
||||
for (h = t = n = s; h && *h; h = n) {
|
||||
for (; *t && *t != ';'; t++) ; /* find ; or \0 */
|
||||
if (*t) n = t + 1; else n = NULL; /* remember next */
|
||||
if (*t)
|
||||
n = t + 1;
|
||||
else
|
||||
n = NULL; /* remember next */
|
||||
*t = '\0'; /* terminate */
|
||||
drm_parse_option(h); /* parse */
|
||||
}
|
||||
|
@ -120,7 +128,8 @@ void drm_parse_options(char *s)
|
|||
int drm_cpu_valid(void)
|
||||
{
|
||||
#if defined(__i386__)
|
||||
if (boot_cpu_data.x86 == 3) return 0; /* No cmpxchg on a 386 */
|
||||
if (boot_cpu_data.x86 == 3)
|
||||
return 0; /* No cmpxchg on a 386 */
|
||||
#endif
|
||||
#if defined(__sparc__) && !defined(__sparc_v9__)
|
||||
return 0; /* No cmpxchg before v9 sparc. */
|
||||
|
|
|
@ -91,16 +91,19 @@ int drm_setunique(struct inode *inode, struct file *filp,
|
|||
drm_unique_t u;
|
||||
int domain, bus, slot, func, ret;
|
||||
|
||||
if (dev->unique_len || dev->unique) return -EBUSY;
|
||||
if (dev->unique_len || dev->unique)
|
||||
return -EBUSY;
|
||||
|
||||
if (copy_from_user(&u, (drm_unique_t __user *) arg, sizeof(u)))
|
||||
return -EFAULT;
|
||||
|
||||
if (!u.unique_len || u.unique_len > 1024) return -EINVAL;
|
||||
if (!u.unique_len || u.unique_len > 1024)
|
||||
return -EINVAL;
|
||||
|
||||
dev->unique_len = u.unique_len;
|
||||
dev->unique = drm_alloc(u.unique_len + 1, DRM_MEM_DRIVER);
|
||||
if(!dev->unique) return -ENOMEM;
|
||||
if (!dev->unique)
|
||||
return -ENOMEM;
|
||||
if (copy_from_user(dev->unique, u.unique, dev->unique_len))
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -124,15 +127,13 @@ int drm_setunique(struct inode *inode, struct file *filp,
|
|||
|
||||
if ((domain != dev->pci_domain) ||
|
||||
(bus != dev->pci_bus) ||
|
||||
(slot != dev->pci_slot) ||
|
||||
(func != dev->pci_func))
|
||||
(slot != dev->pci_slot) || (func != dev->pci_func))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
drm_set_busid(drm_device_t *dev)
|
||||
static int drm_set_busid(drm_device_t * dev)
|
||||
{
|
||||
if (dev->unique != NULL)
|
||||
return EBUSY;
|
||||
|
@ -155,7 +156,6 @@ drm_set_busid(drm_device_t *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get a mapping information.
|
||||
*
|
||||
|
@ -212,7 +212,8 @@ int drm_getmap( struct inode *inode, struct file *filp,
|
|||
map.mtrr = r_list->map->mtrr;
|
||||
up(&dev->struct_sem);
|
||||
|
||||
if (copy_to_user(argp, &map, sizeof(map))) return -EFAULT;
|
||||
if (copy_to_user(argp, &map, sizeof(map)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -244,8 +245,7 @@ int drm_getclient( struct inode *inode, struct file *filp,
|
|||
return -EFAULT;
|
||||
idx = client.idx;
|
||||
down(&dev->struct_sem);
|
||||
for (i = 0, pt = dev->file_first; i < idx && pt; i++, pt = pt->next)
|
||||
;
|
||||
for (i = 0, pt = dev->file_first; i < idx && pt; i++, pt = pt->next) ;
|
||||
|
||||
if (!pt) {
|
||||
up(&dev->struct_sem);
|
||||
|
@ -288,8 +288,7 @@ int drm_getstats( struct inode *inode, struct file *filp,
|
|||
for (i = 0; i < dev->counters; i++) {
|
||||
if (dev->types[i] == _DRM_STAT_LOCK)
|
||||
stats.data[i].value
|
||||
= (dev->lock.hw_lock
|
||||
? dev->lock.hw_lock->lock : 0);
|
||||
= (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0);
|
||||
else
|
||||
stats.data[i].value = atomic_read(&dev->counts[i]);
|
||||
stats.data[i].type = dev->types[i];
|
||||
|
|
|
@ -66,14 +66,12 @@ int drm_irq_by_busid(struct inode *inode, struct file *filp,
|
|||
|
||||
if ((p.busnum >> 8) != dev->pci_domain ||
|
||||
(p.busnum & 0xff) != dev->pci_bus ||
|
||||
p.devnum != dev->pci_slot ||
|
||||
p.funcnum != dev->pci_func)
|
||||
p.devnum != dev->pci_slot || p.funcnum != dev->pci_func)
|
||||
return -EINVAL;
|
||||
|
||||
p.irq = dev->irq;
|
||||
|
||||
DRM_DEBUG("%d:%d:%d => IRQ %d\n",
|
||||
p.busnum, p.devnum, p.funcnum, p.irq);
|
||||
DRM_DEBUG("%d:%d:%d => IRQ %d\n", p.busnum, p.devnum, p.funcnum, p.irq);
|
||||
if (copy_to_user(argp, &p, sizeof(p)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
|
@ -288,9 +286,9 @@ int drm_wait_vblank( DRM_IOCTL_ARGS )
|
|||
list_for_each_entry(vbl_sig, &dev->vbl_sigs.head, head) {
|
||||
if (vbl_sig->sequence == vblwait.request.sequence
|
||||
&& vbl_sig->info.si_signo == vblwait.request.signal
|
||||
&& vbl_sig->task == current)
|
||||
{
|
||||
spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
|
||||
&& vbl_sig->task == current) {
|
||||
spin_unlock_irqrestore(&dev->vbl_lock,
|
||||
irqflags);
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
@ -304,7 +302,9 @@ int drm_wait_vblank( DRM_IOCTL_ARGS )
|
|||
|
||||
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
|
||||
|
||||
if ( !( vbl_sig = drm_alloc( sizeof( drm_vbl_sig_t ), DRM_MEM_DRIVER ) ) ) {
|
||||
if (!
|
||||
(vbl_sig =
|
||||
drm_alloc(sizeof(drm_vbl_sig_t), DRM_MEM_DRIVER))) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -321,7 +321,9 @@ int drm_wait_vblank( DRM_IOCTL_ARGS )
|
|||
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
|
||||
} else {
|
||||
if (dev->fn_tbl->vblank_wait)
|
||||
ret = dev->fn_tbl->vblank_wait( dev, &vblwait.request.sequence );
|
||||
ret =
|
||||
dev->fn_tbl->vblank_wait(dev,
|
||||
&vblwait.request.sequence);
|
||||
|
||||
do_gettimeofday(&now);
|
||||
vblwait.reply.tval_sec = now.tv_sec;
|
||||
|
@ -356,7 +358,8 @@ void drm_vbl_send_signals( drm_device_t *dev )
|
|||
vbl_sig = list_entry(list, drm_vbl_sig_t, head);
|
||||
if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
|
||||
vbl_sig->info.si_code = vbl_seq;
|
||||
send_sig_info( vbl_sig->info.si_signo, &vbl_sig->info, vbl_sig->task );
|
||||
send_sig_info(vbl_sig->info.si_signo, &vbl_sig->info,
|
||||
vbl_sig->task);
|
||||
|
||||
list_del(list);
|
||||
|
||||
|
@ -369,5 +372,3 @@ void drm_vbl_send_signals( drm_device_t *dev )
|
|||
spin_unlock_irqrestore(&dev->vbl_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_vbl_send_signals);
|
||||
|
||||
|
||||
|
|
|
@ -35,7 +35,6 @@
|
|||
|
||||
#include "drmP.h"
|
||||
|
||||
|
||||
/**
|
||||
* Lock ioctl.
|
||||
*
|
||||
|
@ -83,8 +82,7 @@ int drm_lock( struct inode *inode, struct file *filp,
|
|||
ret = -EINTR;
|
||||
break;
|
||||
}
|
||||
if ( drm_lock_take( &dev->lock.hw_lock->lock,
|
||||
lock.context ) ) {
|
||||
if (drm_lock_take(&dev->lock.hw_lock->lock, lock.context)) {
|
||||
dev->lock.filp = filp;
|
||||
dev->lock.lock_time = jiffies;
|
||||
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
|
||||
|
@ -108,8 +106,7 @@ int drm_lock( struct inode *inode, struct file *filp,
|
|||
sigaddset(&dev->sigmask, SIGTTOU);
|
||||
dev->sigdata.context = lock.context;
|
||||
dev->sigdata.lock = dev->lock.hw_lock;
|
||||
block_all_signals( drm_notifier,
|
||||
&dev->sigdata, &dev->sigmask );
|
||||
block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
|
||||
|
||||
if (dev->fn_tbl->dma_ready && (lock.flags & _DRM_LOCK_READY))
|
||||
dev->fn_tbl->dma_ready(dev);
|
||||
|
@ -117,13 +114,12 @@ int drm_lock( struct inode *inode, struct file *filp,
|
|||
if (dev->fn_tbl->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT))
|
||||
return dev->fn_tbl->dma_quiescent(dev);
|
||||
|
||||
|
||||
if ( dev->fn_tbl->kernel_context_switch && dev->last_context != lock.context ) {
|
||||
if (dev->fn_tbl->kernel_context_switch
|
||||
&& dev->last_context != lock.context) {
|
||||
dev->fn_tbl->kernel_context_switch(dev, dev->last_context,
|
||||
lock.context);
|
||||
}
|
||||
|
||||
|
||||
DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
|
||||
|
||||
return ret;
|
||||
|
@ -160,8 +156,7 @@ int drm_unlock( struct inode *inode, struct file *filp,
|
|||
|
||||
if (dev->fn_tbl->kernel_context_switch_unlock)
|
||||
dev->fn_tbl->kernel_context_switch_unlock(dev);
|
||||
else
|
||||
{
|
||||
else {
|
||||
drm_lock_transfer(dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT);
|
||||
|
||||
|
@ -190,8 +185,10 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
|
|||
|
||||
do {
|
||||
old = *lock;
|
||||
if (old & _DRM_LOCK_HELD) new = old | _DRM_LOCK_CONT;
|
||||
else new = context | _DRM_LOCK_HELD;
|
||||
if (old & _DRM_LOCK_HELD)
|
||||
new = old | _DRM_LOCK_CONT;
|
||||
else
|
||||
new = context | _DRM_LOCK_HELD;
|
||||
prev = cmpxchg(lock, old, new);
|
||||
} while (prev != old);
|
||||
if (_DRM_LOCKING_CONTEXT(old) == context) {
|
||||
|
@ -260,8 +257,7 @@ int drm_lock_free(drm_device_t *dev,
|
|||
} while (prev != old);
|
||||
if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
|
||||
DRM_ERROR("%d freed heavyweight lock held by %d\n",
|
||||
context,
|
||||
_DRM_LOCKING_CONTEXT(old));
|
||||
context, _DRM_LOCKING_CONTEXT(old));
|
||||
return 1;
|
||||
}
|
||||
wake_up_interruptible(&dev->lock.lock_queue);
|
||||
|
@ -284,10 +280,10 @@ int drm_notifier(void *priv)
|
|||
drm_sigdata_t *s = (drm_sigdata_t *) priv;
|
||||
unsigned int old, new, prev;
|
||||
|
||||
|
||||
/* Allow signal delivery if lock isn't held */
|
||||
if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock)
|
||||
|| _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context) return 1;
|
||||
|| _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context)
|
||||
return 1;
|
||||
|
||||
/* Otherwise, set flag to force call to
|
||||
drmUnlock */
|
||||
|
|
|
@ -83,7 +83,8 @@ void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
|
|||
{
|
||||
void *pt;
|
||||
|
||||
if (!(pt = kmalloc(size, GFP_KERNEL))) return NULL;
|
||||
if (!(pt = kmalloc(size, GFP_KERNEL)))
|
||||
return NULL;
|
||||
if (oldpt && oldsize) {
|
||||
memcpy(pt, oldpt, oldsize);
|
||||
kfree(oldpt);
|
||||
|
@ -116,8 +117,7 @@ unsigned long drm_alloc_pages(int order, int area)
|
|||
|
||||
/* Reserve */
|
||||
for (addr = address, sz = bytes;
|
||||
sz > 0;
|
||||
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
|
||||
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
|
||||
SetPageReserved(virt_to_page(addr));
|
||||
}
|
||||
|
||||
|
@ -144,15 +144,13 @@ void drm_free_pages(unsigned long address, int order, int area)
|
|||
|
||||
/* Unreserve */
|
||||
for (addr = address, sz = bytes;
|
||||
sz > 0;
|
||||
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
|
||||
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
|
||||
ClearPageReserved(virt_to_page(addr));
|
||||
}
|
||||
|
||||
free_pages(address, order);
|
||||
}
|
||||
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
/** Wrapper around agp_allocate_memory() */
|
||||
DRM_AGP_MEM *drm_alloc_agp(int pages, u32 type)
|
||||
|
|
|
@ -72,8 +72,8 @@
|
|||
/*
|
||||
* Find the drm_map that covers the range [offset, offset+size).
|
||||
*/
|
||||
static inline drm_map_t *
|
||||
drm_lookup_map (unsigned long offset, unsigned long size, drm_device_t *dev)
|
||||
static inline drm_map_t *drm_lookup_map(unsigned long offset,
|
||||
unsigned long size, drm_device_t * dev)
|
||||
{
|
||||
struct list_head *list;
|
||||
drm_map_list_t *r_list;
|
||||
|
@ -84,16 +84,18 @@ drm_lookup_map (unsigned long offset, unsigned long size, drm_device_t *dev)
|
|||
map = r_list->map;
|
||||
if (!map)
|
||||
continue;
|
||||
if (map->offset <= offset && (offset + size) <= (map->offset + map->size))
|
||||
if (map->offset <= offset
|
||||
&& (offset + size) <= (map->offset + map->size))
|
||||
return map;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void *
|
||||
agp_remap (unsigned long offset, unsigned long size, drm_device_t *dev)
|
||||
static inline void *agp_remap(unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev)
|
||||
{
|
||||
unsigned long *phys_addr_map, i, num_pages = PAGE_ALIGN(size) / PAGE_SIZE;
|
||||
unsigned long *phys_addr_map, i, num_pages =
|
||||
PAGE_ALIGN(size) / PAGE_SIZE;
|
||||
struct drm_agp_mem *agpmem;
|
||||
struct page **page_map;
|
||||
void *addr;
|
||||
|
@ -106,7 +108,8 @@ agp_remap (unsigned long offset, unsigned long size, drm_device_t *dev)
|
|||
|
||||
for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next)
|
||||
if (agpmem->bound <= offset
|
||||
&& (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= (offset + size))
|
||||
&& (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
|
||||
(offset + size))
|
||||
break;
|
||||
if (!agpmem)
|
||||
return NULL;
|
||||
|
@ -121,7 +124,8 @@ agp_remap (unsigned long offset, unsigned long size, drm_device_t *dev)
|
|||
if (!page_map)
|
||||
return NULL;
|
||||
|
||||
phys_addr_map = agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
|
||||
phys_addr_map =
|
||||
agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
|
||||
for (i = 0; i < num_pages; ++i)
|
||||
page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
|
||||
addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
|
||||
|
@ -130,8 +134,7 @@ agp_remap (unsigned long offset, unsigned long size, drm_device_t *dev)
|
|||
return addr;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
drm_follow_page (void *vaddr)
|
||||
static inline unsigned long drm_follow_page(void *vaddr)
|
||||
{
|
||||
pgd_t *pgd = pgd_offset_k((unsigned long)vaddr);
|
||||
pmd_t *pmd = pmd_offset(pgd, (unsigned long)vaddr);
|
||||
|
@ -141,12 +144,14 @@ drm_follow_page (void *vaddr)
|
|||
|
||||
#else /* __OS_HAS_AGP */
|
||||
|
||||
static inline drm_map_t *drm_lookup_map(unsigned long offset, unsigned long size, drm_device_t *dev)
|
||||
static inline drm_map_t *drm_lookup_map(unsigned long offset,
|
||||
unsigned long size, drm_device_t * dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void *agp_remap(unsigned long offset, unsigned long size, drm_device_t *dev)
|
||||
static inline void *agp_remap(unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
@ -158,7 +163,8 @@ static inline unsigned long drm_follow_page (void *vaddr)
|
|||
|
||||
#endif
|
||||
|
||||
static inline void *drm_ioremap(unsigned long offset, unsigned long size, drm_device_t *dev)
|
||||
static inline void *drm_ioremap(unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev)
|
||||
{
|
||||
#if defined(VMAP_4_ARGS)
|
||||
if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
|
||||
|
@ -172,8 +178,8 @@ static inline void *drm_ioremap(unsigned long offset, unsigned long size, drm_de
|
|||
return ioremap(offset, size);
|
||||
}
|
||||
|
||||
static inline void *drm_ioremap_nocache(unsigned long offset, unsigned long size,
|
||||
drm_device_t *dev)
|
||||
static inline void *drm_ioremap_nocache(unsigned long offset,
|
||||
unsigned long size, drm_device_t * dev)
|
||||
{
|
||||
#if defined(VMAP_4_ARGS)
|
||||
if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
|
||||
|
@ -187,7 +193,8 @@ static inline void *drm_ioremap_nocache(unsigned long offset, unsigned long size
|
|||
return ioremap_nocache(offset, size);
|
||||
}
|
||||
|
||||
static inline void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev)
|
||||
static inline void drm_ioremapfree(void *pt, unsigned long size,
|
||||
drm_device_t * dev)
|
||||
{
|
||||
#if defined(VMAP_4_ARGS)
|
||||
/*
|
||||
|
@ -196,8 +203,8 @@ static inline void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *d
|
|||
* a future revision of the interface...
|
||||
*/
|
||||
if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture
|
||||
&& ((unsigned long) pt >= VMALLOC_START && (unsigned long) pt < VMALLOC_END))
|
||||
{
|
||||
&& ((unsigned long)pt >= VMALLOC_START
|
||||
&& (unsigned long)pt < VMALLOC_END)) {
|
||||
unsigned long offset;
|
||||
drm_map_t *map;
|
||||
|
||||
|
@ -209,6 +216,5 @@ static inline void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *d
|
|||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
iounmap(pt);
|
||||
}
|
||||
|
|
|
@ -129,7 +129,8 @@ static int drm__mem_info(char *buf, char **start, off_t offset,
|
|||
- (long)pt->bytes_freed);
|
||||
}
|
||||
|
||||
if (len > request + offset) return request;
|
||||
if (len > request + offset)
|
||||
return request;
|
||||
*eof = 1;
|
||||
return len - offset;
|
||||
}
|
||||
|
@ -182,7 +183,8 @@ void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
|
|||
{
|
||||
void *pt;
|
||||
|
||||
if (!(pt = drm_alloc(size, area))) return NULL;
|
||||
if (!(pt = drm_alloc(size, area)))
|
||||
return NULL;
|
||||
if (oldpt && oldsize) {
|
||||
memcpy(pt, oldpt, oldsize);
|
||||
drm_free(oldpt, oldsize, area);
|
||||
|
@ -195,8 +197,10 @@ void drm_free(void *pt, size_t size, int area)
|
|||
int alloc_count;
|
||||
int free_count;
|
||||
|
||||
if (!pt) DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
|
||||
else kfree(pt);
|
||||
if (!pt)
|
||||
DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
|
||||
else
|
||||
kfree(pt);
|
||||
spin_lock(&drm_mem_lock);
|
||||
drm_mem_stats[area].bytes_freed += size;
|
||||
free_count = ++drm_mem_stats[area].free_count;
|
||||
|
@ -236,14 +240,12 @@ unsigned long drm_alloc_pages(int order, int area)
|
|||
drm_ram_used += bytes;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
|
||||
|
||||
/* Zero outside the lock */
|
||||
memset((void *)address, 0, bytes);
|
||||
|
||||
/* Reserve */
|
||||
for (addr = address, sz = bytes;
|
||||
sz > 0;
|
||||
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
|
||||
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
|
||||
SetPageReserved(virt_to_page(addr));
|
||||
}
|
||||
|
||||
|
@ -263,8 +265,7 @@ void drm_free_pages(unsigned long address, int order, int area)
|
|||
} else {
|
||||
/* Unreserve */
|
||||
for (addr = address, sz = bytes;
|
||||
sz > 0;
|
||||
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
|
||||
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
|
||||
ClearPageReserved(virt_to_page(addr));
|
||||
}
|
||||
free_pages(address, order);
|
||||
|
@ -306,7 +307,8 @@ void *drm_ioremap(unsigned long offset, unsigned long size, drm_device_t *dev)
|
|||
return pt;
|
||||
}
|
||||
|
||||
void *drm_ioremap_nocache(unsigned long offset, unsigned long size, drm_device_t *dev)
|
||||
void *drm_ioremap_nocache(unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev)
|
||||
{
|
||||
void *pt;
|
||||
|
||||
|
@ -442,7 +444,8 @@ int drm_unbind_agp(DRM_AGP_MEM *handle)
|
|||
return retcode;
|
||||
}
|
||||
|
||||
if ((retcode = drm_agp_unbind_memory(handle))) return retcode;
|
||||
if ((retcode = drm_agp_unbind_memory(handle)))
|
||||
return retcode;
|
||||
spin_lock(&drm_mem_lock);
|
||||
free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count;
|
||||
alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
|
||||
|
|
|
@ -46,7 +46,8 @@ typedef struct drm_mem_stats {
|
|||
static spinlock_t DRM(mem_lock) = SPIN_LOCK_UNLOCKED;
|
||||
static unsigned long DRM(ram_available) = 0; /* In pages */
|
||||
static unsigned long DRM(ram_used) = 0;
|
||||
static drm_mem_stats_t DRM(mem_stats)[] = {
|
||||
static drm_mem_stats_t DRM(mem_stats)[] =
|
||||
{
|
||||
[DRM_MEM_DMA] = {"dmabufs"},
|
||||
[DRM_MEM_SAREA] = {"sareas"},
|
||||
[DRM_MEM_DRIVER] = {"driver"},
|
||||
|
@ -72,8 +73,7 @@ static drm_mem_stats_t DRM(mem_stats)[] = {
|
|||
{NULL, 0,} /* Last entry must be null */
|
||||
};
|
||||
|
||||
void DRM(mem_init)(void)
|
||||
{
|
||||
void DRM(mem_init) (void) {
|
||||
drm_mem_stats_t *mem;
|
||||
struct sysinfo si;
|
||||
|
||||
|
@ -93,8 +93,7 @@ void DRM(mem_init)(void)
|
|||
/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
|
||||
|
||||
static int DRM(_mem_info) (char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data)
|
||||
{
|
||||
int request, int *eof, void *data) {
|
||||
drm_mem_stats_t *pt;
|
||||
int len = 0;
|
||||
|
||||
|
@ -129,14 +128,14 @@ static int DRM(_mem_info)(char *buf, char **start, off_t offset,
|
|||
- (long)pt->bytes_freed);
|
||||
}
|
||||
|
||||
if (len > request + offset) return request;
|
||||
if (len > request + offset)
|
||||
return request;
|
||||
*eof = 1;
|
||||
return len - offset;
|
||||
}
|
||||
|
||||
int DRM(mem_info) (char *buf, char **start, off_t offset,
|
||||
int len, int *eof, void *data)
|
||||
{
|
||||
int len, int *eof, void *data) {
|
||||
int ret;
|
||||
|
||||
spin_lock(&DRM(mem_lock));
|
||||
|
@ -145,8 +144,7 @@ int DRM(mem_info)(char *buf, char **start, off_t offset,
|
|||
return ret;
|
||||
}
|
||||
|
||||
void *DRM(alloc)(size_t size, int area)
|
||||
{
|
||||
void *DRM(alloc) (size_t size, int area) {
|
||||
void *pt;
|
||||
|
||||
if (!size) {
|
||||
|
@ -167,8 +165,7 @@ void *DRM(alloc)(size_t size, int area)
|
|||
return pt;
|
||||
}
|
||||
|
||||
void *DRM(calloc)(size_t nmemb, size_t size, int area)
|
||||
{
|
||||
void *DRM(calloc) (size_t nmemb, size_t size, int area) {
|
||||
void *addr;
|
||||
|
||||
addr = DRM(alloc) (nmemb * size, area);
|
||||
|
@ -178,11 +175,11 @@ void *DRM(calloc)(size_t nmemb, size_t size, int area)
|
|||
return addr;
|
||||
}
|
||||
|
||||
void *DRM(realloc)(void *oldpt, size_t oldsize, size_t size, int area)
|
||||
{
|
||||
void *DRM(realloc) (void *oldpt, size_t oldsize, size_t size, int area) {
|
||||
void *pt;
|
||||
|
||||
if (!(pt = DRM(alloc)(size, area))) return NULL;
|
||||
if (!(pt = DRM(alloc) (size, area)))
|
||||
return NULL;
|
||||
if (oldpt && oldsize) {
|
||||
memcpy(pt, oldpt, oldsize);
|
||||
DRM(free) (oldpt, oldsize, area);
|
||||
|
@ -190,13 +187,14 @@ void *DRM(realloc)(void *oldpt, size_t oldsize, size_t size, int area)
|
|||
return pt;
|
||||
}
|
||||
|
||||
void DRM(free)(void *pt, size_t size, int area)
|
||||
{
|
||||
void DRM(free) (void *pt, size_t size, int area) {
|
||||
int alloc_count;
|
||||
int free_count;
|
||||
|
||||
if (!pt) DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
|
||||
else kfree(pt);
|
||||
if (!pt)
|
||||
DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
|
||||
else
|
||||
kfree(pt);
|
||||
spin_lock(&DRM(mem_lock));
|
||||
DRM(mem_stats)[area].bytes_freed += size;
|
||||
free_count = ++DRM(mem_stats)[area].free_count;
|
||||
|
@ -208,8 +206,7 @@ void DRM(free)(void *pt, size_t size, int area)
|
|||
}
|
||||
}
|
||||
|
||||
unsigned long DRM(alloc_pages)(int order, int area)
|
||||
{
|
||||
unsigned long DRM(alloc_pages) (int order, int area) {
|
||||
unsigned long address;
|
||||
unsigned long bytes = PAGE_SIZE << order;
|
||||
unsigned long addr;
|
||||
|
@ -236,22 +233,19 @@ unsigned long DRM(alloc_pages)(int order, int area)
|
|||
DRM(ram_used) += bytes;
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
|
||||
|
||||
/* Zero outside the lock */
|
||||
memset((void *)address, 0, bytes);
|
||||
|
||||
/* Reserve */
|
||||
for (addr = address, sz = bytes;
|
||||
sz > 0;
|
||||
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
|
||||
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
|
||||
SetPageReserved(virt_to_page(addr));
|
||||
}
|
||||
|
||||
return address;
|
||||
}
|
||||
|
||||
void DRM(free_pages)(unsigned long address, int order, int area)
|
||||
{
|
||||
void DRM(free_pages) (unsigned long address, int order, int area) {
|
||||
unsigned long bytes = PAGE_SIZE << order;
|
||||
int alloc_count;
|
||||
int free_count;
|
||||
|
@ -263,8 +257,7 @@ void DRM(free_pages)(unsigned long address, int order, int area)
|
|||
} else {
|
||||
/* Unreserve */
|
||||
for (addr = address, sz = bytes;
|
||||
sz > 0;
|
||||
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
|
||||
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
|
||||
ClearPageReserved(virt_to_page(addr));
|
||||
}
|
||||
free_pages(address, order);
|
||||
|
@ -283,8 +276,8 @@ void DRM(free_pages)(unsigned long address, int order, int area)
|
|||
}
|
||||
}
|
||||
|
||||
void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev)
|
||||
{
|
||||
void *DRM(ioremap) (unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev) {
|
||||
void *pt;
|
||||
|
||||
if (!size) {
|
||||
|
@ -306,8 +299,8 @@ void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev)
|
|||
return pt;
|
||||
}
|
||||
|
||||
void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, drm_device_t *dev)
|
||||
{
|
||||
void *DRM(ioremap_nocache) (unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev) {
|
||||
void *pt;
|
||||
|
||||
if (!size) {
|
||||
|
@ -329,8 +322,7 @@ void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, drm_device_
|
|||
return pt;
|
||||
}
|
||||
|
||||
void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev)
|
||||
{
|
||||
void DRM(ioremapfree) (void *pt, unsigned long size, drm_device_t * dev) {
|
||||
int alloc_count;
|
||||
int free_count;
|
||||
|
||||
|
@ -354,8 +346,7 @@ void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev)
|
|||
|
||||
#if __OS_HAS_AGP
|
||||
|
||||
DRM_AGP_MEM *DRM(alloc_agp)(int pages, u32 type)
|
||||
{
|
||||
DRM_AGP_MEM *DRM(alloc_agp) (int pages, u32 type) {
|
||||
DRM_AGP_MEM *handle;
|
||||
|
||||
if (!pages) {
|
||||
|
@ -377,8 +368,7 @@ DRM_AGP_MEM *DRM(alloc_agp)(int pages, u32 type)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
int DRM(free_agp)(DRM_AGP_MEM *handle, int pages)
|
||||
{
|
||||
int DRM(free_agp) (DRM_AGP_MEM * handle, int pages) {
|
||||
int alloc_count;
|
||||
int free_count;
|
||||
int retval = -EINVAL;
|
||||
|
@ -406,8 +396,7 @@ int DRM(free_agp)(DRM_AGP_MEM *handle, int pages)
|
|||
return retval;
|
||||
}
|
||||
|
||||
int DRM(bind_agp)(DRM_AGP_MEM *handle, unsigned int start)
|
||||
{
|
||||
int DRM(bind_agp) (DRM_AGP_MEM * handle, unsigned int start) {
|
||||
int retcode = -EINVAL;
|
||||
|
||||
if (!handle) {
|
||||
|
@ -430,8 +419,7 @@ int DRM(bind_agp)(DRM_AGP_MEM *handle, unsigned int start)
|
|||
return retcode;
|
||||
}
|
||||
|
||||
int DRM(unbind_agp)(DRM_AGP_MEM *handle)
|
||||
{
|
||||
int DRM(unbind_agp) (DRM_AGP_MEM * handle) {
|
||||
int alloc_count;
|
||||
int free_count;
|
||||
int retcode = -EINVAL;
|
||||
|
@ -442,7 +430,8 @@ int DRM(unbind_agp)(DRM_AGP_MEM *handle)
|
|||
return retcode;
|
||||
}
|
||||
|
||||
if ((retcode = DRM(agp_unbind_memory)(handle))) return retcode;
|
||||
if ((retcode = DRM(agp_unbind_memory) (handle)))
|
||||
return retcode;
|
||||
spin_lock(&DRM(mem_lock));
|
||||
free_count = ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].free_count;
|
||||
alloc_count = DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
* OS abstraction macros.
|
||||
*/
|
||||
|
||||
|
||||
#include <linux/interrupt.h> /* For task queue support */
|
||||
#include <linux/delay.h>
|
||||
|
||||
|
@ -87,11 +86,11 @@ static __inline__ int mtrr_add (unsigned long base, unsigned long size,
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
static __inline__ int mtrr_del (int reg, unsigned long base,
|
||||
unsigned long size)
|
||||
static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
#define MTRR_TYPE_WRCOMB 1
|
||||
#endif
|
||||
|
||||
|
@ -124,7 +123,6 @@ static __inline__ int mtrr_del (int reg, unsigned long base,
|
|||
#define DRM_PUT_USER_UNCHECKED(uaddr, val) \
|
||||
__put_user(val, uaddr)
|
||||
|
||||
|
||||
#define DRM_GET_PRIV_WITH_RETURN(_priv, _filp) _priv = _filp->private_data
|
||||
|
||||
/**
|
||||
|
@ -171,7 +169,5 @@ do { \
|
|||
remove_wait_queue(&(queue), &entry); \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define DRM_WAKEUP( queue ) wake_up_interruptible( queue )
|
||||
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
* \todo Add support to map these buffers.
|
||||
* \todo The wrappers here are so thin that they would be better off inlined..
|
||||
*
|
||||
* \author Jos<EFBFBD>Fonseca <jrfonseca@tungstengraphics.com>
|
||||
* \author Jose Fonseca <jrfonseca@tungstengraphics.com>
|
||||
* \author Leif Delgass <ldelgass@retinalburn.net>
|
||||
*/
|
||||
|
||||
|
@ -37,7 +37,6 @@
|
|||
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include "drmP.h"
|
||||
|
||||
|
@ -45,12 +44,10 @@
|
|||
/** \name PCI memory */
|
||||
/*@{*/
|
||||
|
||||
|
||||
/**
|
||||
* \brief Allocate a PCI consistent memory block, for DMA.
|
||||
*/
|
||||
void *
|
||||
drm_pci_alloc(drm_device_t *dev, size_t size, size_t align,
|
||||
void *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
|
||||
dma_addr_t maxaddr, dma_addr_t * busaddr)
|
||||
{
|
||||
void *address;
|
||||
|
@ -108,8 +105,7 @@ drm_pci_alloc(drm_device_t *dev, size_t size, size_t align,
|
|||
/* XXX - Is virt_to_page() legal for consistent mem? */
|
||||
/* Reserve */
|
||||
for (addr = (unsigned long)address, sz = size;
|
||||
sz > 0;
|
||||
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
|
||||
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
|
||||
SetPageReserved(virt_to_page(addr));
|
||||
}
|
||||
#endif
|
||||
|
@ -143,8 +139,7 @@ drm_pci_free(drm_device_t *dev, size_t size, void *vaddr, dma_addr_t busaddr)
|
|||
/* XXX - Is virt_to_page() legal for consistent mem? */
|
||||
/* Unreserve */
|
||||
for (addr = (unsigned long)vaddr, sz = size;
|
||||
sz > 0;
|
||||
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
|
||||
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
|
||||
ClearPageReserved(virt_to_page(addr));
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -71,6 +71,7 @@ struct drm_proc_list {
|
|||
{"vma", drm_vma_info},
|
||||
#endif
|
||||
};
|
||||
|
||||
#define DRM_PROC_ENTRIES (sizeof(drm_proc_list)/sizeof(drm_proc_list[0]))
|
||||
|
||||
/**
|
||||
|
@ -87,8 +88,7 @@ struct drm_proc_list {
|
|||
* "/proc/dri/%minor%/%name%".
|
||||
*/
|
||||
int drm_proc_init(drm_device_t * dev, int minor,
|
||||
struct proc_dir_entry *root,
|
||||
struct proc_dir_entry **dev_root)
|
||||
struct proc_dir_entry *root, struct proc_dir_entry **dev_root)
|
||||
{
|
||||
struct proc_dir_entry *ent;
|
||||
int i, j;
|
||||
|
@ -119,7 +119,6 @@ int drm_proc_init(drm_device_t *dev, int minor,
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Cleanup the proc filesystem resources.
|
||||
*
|
||||
|
@ -136,7 +135,8 @@ int drm_proc_cleanup(int minor, struct proc_dir_entry *root,
|
|||
int i;
|
||||
char name[64];
|
||||
|
||||
if (!root || !dev_root) return 0;
|
||||
if (!root || !dev_root)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < DRM_PROC_ENTRIES; i++)
|
||||
remove_proc_entry(drm_proc_list[i].name, dev_root);
|
||||
|
@ -175,12 +175,15 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request,
|
|||
|
||||
if (dev->unique) {
|
||||
DRM_PROC_PRINT("%s 0x%lx %s\n",
|
||||
dev->name, (long)old_encode_dev(dev->device), dev->unique);
|
||||
dev->name, (long)old_encode_dev(dev->device),
|
||||
dev->unique);
|
||||
} else {
|
||||
DRM_PROC_PRINT("%s 0x%lx\n", dev->name, (long)old_encode_dev(dev->device));
|
||||
DRM_PROC_PRINT("%s 0x%lx\n", dev->name,
|
||||
(long)old_encode_dev(dev->device));
|
||||
}
|
||||
|
||||
if (len > request + offset) return request;
|
||||
if (len > request + offset)
|
||||
return request;
|
||||
*eof = 1;
|
||||
return len - offset;
|
||||
}
|
||||
|
@ -225,19 +228,21 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
|
|||
DRM_PROC_PRINT("slot offset size type flags "
|
||||
"address mtrr\n\n");
|
||||
i = 0;
|
||||
if (dev->maplist != NULL) list_for_each(list, &dev->maplist->head) {
|
||||
if (dev->maplist != NULL)
|
||||
list_for_each(list, &dev->maplist->head) {
|
||||
r_list = list_entry(list, drm_map_list_t, head);
|
||||
map = r_list->map;
|
||||
if(!map) continue;
|
||||
if (map->type < 0 || map->type > 4) type = "??";
|
||||
else type = types[map->type];
|
||||
if (!map)
|
||||
continue;
|
||||
if (map->type < 0 || map->type > 4)
|
||||
type = "??";
|
||||
else
|
||||
type = types[map->type];
|
||||
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
|
||||
i,
|
||||
map->offset,
|
||||
map->size,
|
||||
type,
|
||||
map->flags,
|
||||
(unsigned long)map->handle);
|
||||
type, map->flags, (unsigned long)map->handle);
|
||||
if (map->mtrr < 0) {
|
||||
DRM_PROC_PRINT("none\n");
|
||||
} else {
|
||||
|
@ -246,7 +251,8 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
|
|||
i++;
|
||||
}
|
||||
|
||||
if (len > request + offset) return request;
|
||||
if (len > request + offset)
|
||||
return request;
|
||||
*eof = 1;
|
||||
return len - offset;
|
||||
}
|
||||
|
@ -310,13 +316,16 @@ static int drm__queues_info(char *buf, char **start, off_t offset,
|
|||
atomic_read(&q->block_read) ? 'r' : '-',
|
||||
atomic_read(&q->block_write) ? 'w' : '-',
|
||||
waitqueue_active(&q->read_queue) ? 'r' : '-',
|
||||
waitqueue_active(&q->write_queue) ? 'w':'-',
|
||||
waitqueue_active(&q->flush_queue) ? 'f':'-',
|
||||
waitqueue_active(&q->
|
||||
write_queue) ? 'w' : '-',
|
||||
waitqueue_active(&q->
|
||||
flush_queue) ? 'f' : '-',
|
||||
DRM_BUFCOUNT(&q->waitlist));
|
||||
atomic_dec(&q->use_count);
|
||||
}
|
||||
|
||||
if (len > request + offset) return request;
|
||||
if (len > request + offset)
|
||||
return request;
|
||||
*eof = 1;
|
||||
return len - offset;
|
||||
}
|
||||
|
@ -381,12 +390,14 @@ static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
|
|||
}
|
||||
DRM_PROC_PRINT("\n");
|
||||
for (i = 0; i < dma->buf_count; i++) {
|
||||
if (i && !(i%32)) DRM_PROC_PRINT("\n");
|
||||
if (i && !(i % 32))
|
||||
DRM_PROC_PRINT("\n");
|
||||
DRM_PROC_PRINT(" %d", dma->buflist[i]->list);
|
||||
}
|
||||
DRM_PROC_PRINT("\n");
|
||||
|
||||
if (len > request + offset) return request;
|
||||
if (len > request + offset)
|
||||
return request;
|
||||
*eof = 1;
|
||||
return len - offset;
|
||||
}
|
||||
|
@ -438,12 +449,11 @@ static int drm__clients_info(char *buf, char **start, off_t offset,
|
|||
priv->authenticated ? 'y' : 'n',
|
||||
priv->minor,
|
||||
priv->pid,
|
||||
priv->uid,
|
||||
priv->magic,
|
||||
priv->ioctl_count);
|
||||
priv->uid, priv->magic, priv->ioctl_count);
|
||||
}
|
||||
|
||||
if (len > request + offset) return request;
|
||||
if (len > request + offset)
|
||||
return request;
|
||||
*eof = 1;
|
||||
return len - offset;
|
||||
}
|
||||
|
@ -488,7 +498,8 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
|
|||
atomic_read(&dev->vma_count),
|
||||
high_memory, virt_to_phys(high_memory));
|
||||
for (pt = dev->vmalist; pt; pt = pt->next) {
|
||||
if (!(vma = pt->vma)) continue;
|
||||
if (!(vma = pt->vma))
|
||||
continue;
|
||||
DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx",
|
||||
pt->pid,
|
||||
vma->vm_start,
|
||||
|
@ -517,7 +528,8 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
|
|||
DRM_PROC_PRINT("\n");
|
||||
}
|
||||
|
||||
if (len > request + offset) return request;
|
||||
if (len > request + offset)
|
||||
return request;
|
||||
*eof = 1;
|
||||
return len - offset;
|
||||
}
|
||||
|
@ -534,5 +546,3 @@ static int drm_vma_info(char *buf, char **start, off_t offset, int request,
|
|||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
|
||||
#include <linux/config.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include "drmP.h"
|
||||
|
||||
#define DEBUG_SCATTER 0
|
||||
|
@ -51,14 +52,10 @@ void drm_sg_cleanup( drm_sg_mem_t *entry )
|
|||
vfree(entry->virtual);
|
||||
|
||||
drm_free(entry->busaddr,
|
||||
entry->pages * sizeof(*entry->busaddr),
|
||||
DRM_MEM_PAGES );
|
||||
entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES);
|
||||
drm_free(entry->pagelist,
|
||||
entry->pages * sizeof(*entry->pagelist),
|
||||
DRM_MEM_PAGES );
|
||||
drm_free( entry,
|
||||
sizeof(*entry),
|
||||
DRM_MEM_SGLISTS );
|
||||
entry->pages * sizeof(*entry->pagelist), DRM_MEM_PAGES);
|
||||
drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
|
||||
}
|
||||
|
||||
int drm_sg_alloc(struct inode *inode, struct file *filp,
|
||||
|
@ -107,9 +104,7 @@ int drm_sg_alloc( struct inode *inode, struct file *filp,
|
|||
drm_free(entry->pagelist,
|
||||
entry->pages * sizeof(*entry->pagelist),
|
||||
DRM_MEM_PAGES);
|
||||
drm_free( entry,
|
||||
sizeof(*entry),
|
||||
DRM_MEM_SGLISTS );
|
||||
drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr));
|
||||
|
@ -117,14 +112,11 @@ int drm_sg_alloc( struct inode *inode, struct file *filp,
|
|||
entry->virtual = vmalloc_32(pages << PAGE_SHIFT);
|
||||
if (!entry->virtual) {
|
||||
drm_free(entry->busaddr,
|
||||
entry->pages * sizeof(*entry->busaddr),
|
||||
DRM_MEM_PAGES );
|
||||
entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES);
|
||||
drm_free(entry->pagelist,
|
||||
entry->pages * sizeof(*entry->pagelist),
|
||||
DRM_MEM_PAGES);
|
||||
drm_free( entry,
|
||||
sizeof(*entry),
|
||||
DRM_MEM_SGLISTS );
|
||||
drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm_core.h"
|
||||
|
||||
|
@ -51,7 +52,9 @@ drm_minor_t *drm_minors;
|
|||
struct drm_sysfs_class *drm_class;
|
||||
struct proc_dir_entry *drm_proc_root;
|
||||
|
||||
static int fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver_fn *driver_fn)
|
||||
static int fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
|
||||
const struct pci_device_id *ent,
|
||||
struct drm_driver_fn *driver_fn)
|
||||
{
|
||||
int retcode;
|
||||
|
||||
|
@ -75,7 +78,8 @@ static int fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct pci
|
|||
dev->irq = pdev->irq;
|
||||
|
||||
dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS);
|
||||
if(dev->maplist == NULL) return -ENOMEM;
|
||||
if (dev->maplist == NULL)
|
||||
return -ENOMEM;
|
||||
INIT_LIST_HEAD(&dev->maplist->head);
|
||||
|
||||
/* the DRM has 6 counters */
|
||||
|
@ -95,19 +99,19 @@ static int fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct pci
|
|||
|
||||
if (drm_core_has_AGP(dev)) {
|
||||
dev->agp = drm_agp_init();
|
||||
if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && (dev->agp == NULL)) {
|
||||
if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
|
||||
&& (dev->agp == NULL)) {
|
||||
DRM_ERROR("Cannot initialize the agpgart module.\n");
|
||||
retcode = -EINVAL;
|
||||
goto error_out_unreg;
|
||||
}
|
||||
|
||||
|
||||
if (drm_core_has_MTRR(dev)) {
|
||||
if (dev->agp)
|
||||
dev->agp->agp_mtrr = mtrr_add( dev->agp->agp_info.aper_base,
|
||||
dev->agp->agp_info.aper_size*1024*1024,
|
||||
MTRR_TYPE_WRCOMB,
|
||||
1 );
|
||||
dev->agp->agp_mtrr =
|
||||
mtrr_add(dev->agp->agp_info.aper_base,
|
||||
dev->agp->agp_info.aper_size *
|
||||
1024 * 1024, MTRR_TYPE_WRCOMB, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -142,7 +146,8 @@ static int fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct pci
|
|||
* then register the character device and inter module information.
|
||||
* Try and register, if we fail to register, backout previous work.
|
||||
*/
|
||||
int drm_probe(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver_fn *driver_fn)
|
||||
int drm_probe(struct pci_dev *pdev, const struct pci_device_id *ent,
|
||||
struct drm_driver_fn *driver_fn)
|
||||
{
|
||||
struct class_device *dev_class;
|
||||
drm_device_t *dev;
|
||||
|
@ -160,14 +165,18 @@ int drm_probe(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_
|
|||
if (!dev)
|
||||
return -ENOMEM;
|
||||
|
||||
*minors = (drm_minor_t){.dev = dev, .class = DRM_MINOR_PRIMARY};
|
||||
*minors = (drm_minor_t) {
|
||||
.dev = dev,.class = DRM_MINOR_PRIMARY};
|
||||
dev->minor = minor;
|
||||
if ((ret = fill_in_dev(dev, pdev, ent, driver_fn))) {
|
||||
printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
|
||||
goto err_g1;
|
||||
}
|
||||
if ((ret = drm_proc_init(dev, minor, drm_proc_root, &minors->dev_root))) {
|
||||
printk (KERN_ERR "DRM: Failed to initialize /proc/dri.\n");
|
||||
if ((ret =
|
||||
drm_proc_init(dev, minor, drm_proc_root,
|
||||
&minors->dev_root))) {
|
||||
printk(KERN_ERR
|
||||
"DRM: Failed to initialize /proc/dri.\n");
|
||||
goto err_g1;
|
||||
}
|
||||
if (!drm_fb_loaded) {
|
||||
|
@ -176,9 +185,13 @@ int drm_probe(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_
|
|||
pci_enable_device(pdev);
|
||||
}
|
||||
dev_class = drm_sysfs_device_add(drm_class,
|
||||
MKDEV(DRM_MAJOR, minor), DRM_PCI_DEV(pdev), "card%d", minor);
|
||||
MKDEV(DRM_MAJOR,
|
||||
minor),
|
||||
DRM_PCI_DEV(pdev),
|
||||
"card%d", minor);
|
||||
if (IS_ERR(dev_class)) {
|
||||
printk (KERN_ERR "DRM: Error sysfs_device_add.\n");
|
||||
printk(KERN_ERR
|
||||
"DRM: Error sysfs_device_add.\n");
|
||||
ret = PTR_ERR(dev_class);
|
||||
goto err_g2;
|
||||
}
|
||||
|
@ -197,7 +210,8 @@ err_g2:
|
|||
}
|
||||
drm_proc_cleanup(minor, drm_proc_root, minors->dev_root);
|
||||
err_g1:
|
||||
*minors = (drm_minor_t){.dev = NULL, .class = DRM_MINOR_FREE};
|
||||
*minors = (drm_minor_t) {
|
||||
.dev = NULL,.class = DRM_MINOR_FREE};
|
||||
drm_free(dev, sizeof(*dev), DRM_MEM_STUB);
|
||||
return ret;
|
||||
}
|
||||
|
@ -226,16 +240,24 @@ int drm_get_secondary_minor(drm_device_t *dev, drm_minor_t **sec_minor)
|
|||
for (minor = 0; minor < cards_limit; minor++, minors++) {
|
||||
if (minors->class == DRM_MINOR_FREE) {
|
||||
|
||||
*minors = (drm_minor_t){.dev = dev, .class = DRM_MINOR_SECONDARY};
|
||||
if ((ret = drm_proc_init(dev, minor, drm_proc_root, &minors->dev_root))) {
|
||||
printk (KERN_ERR "DRM: Failed to initialize /proc/dri.\n");
|
||||
*minors = (drm_minor_t) {
|
||||
.dev = dev,.class = DRM_MINOR_SECONDARY};
|
||||
if ((ret =
|
||||
drm_proc_init(dev, minor, drm_proc_root,
|
||||
&minors->dev_root))) {
|
||||
printk(KERN_ERR
|
||||
"DRM: Failed to initialize /proc/dri.\n");
|
||||
goto err_g1;
|
||||
}
|
||||
|
||||
dev_class = drm_sysfs_device_add(drm_class,
|
||||
MKDEV(DRM_MAJOR, minor), DRM_PCI_DEV(dev->pdev), "card%d", minor);
|
||||
MKDEV(DRM_MAJOR,
|
||||
minor),
|
||||
DRM_PCI_DEV(dev->pdev),
|
||||
"card%d", minor);
|
||||
if (IS_ERR(dev_class)) {
|
||||
printk (KERN_ERR "DRM: Error sysfs_device_add.\n");
|
||||
printk(KERN_ERR
|
||||
"DRM: Error sysfs_device_add.\n");
|
||||
ret = PTR_ERR(dev_class);
|
||||
goto err_g2;
|
||||
}
|
||||
|
@ -250,7 +272,8 @@ int drm_get_secondary_minor(drm_device_t *dev, drm_minor_t **sec_minor)
|
|||
err_g2:
|
||||
drm_proc_cleanup(minor, drm_proc_root, minors->dev_root);
|
||||
err_g1:
|
||||
*minors = (drm_minor_t){.dev = NULL, .class = DRM_MINOR_FREE};
|
||||
*minors = (drm_minor_t) {
|
||||
.dev = NULL,.class = DRM_MINOR_FREE};
|
||||
drm_free(dev, sizeof(*dev), DRM_MEM_STUB);
|
||||
return ret;
|
||||
}
|
||||
|
@ -274,7 +297,8 @@ int drm_put_minor(drm_device_t *dev)
|
|||
drm_proc_cleanup(dev->minor, drm_proc_root, minors->dev_root);
|
||||
drm_sysfs_device_remove(MKDEV(DRM_MAJOR, dev->minor));
|
||||
|
||||
*minors = (drm_minor_t){.dev = NULL, .class = DRM_MINOR_FREE};
|
||||
*minors = (drm_minor_t) {
|
||||
.dev = NULL,.class = DRM_MINOR_FREE};
|
||||
drm_free(dev, sizeof(*dev), DRM_MEM_STUB);
|
||||
|
||||
return 0;
|
||||
|
@ -299,8 +323,8 @@ int drm_put_secondary_minor(drm_minor_t *sec_minor)
|
|||
drm_proc_cleanup(minor, drm_proc_root, sec_minor->dev_root);
|
||||
drm_sysfs_device_remove(MKDEV(DRM_MAJOR, minor));
|
||||
|
||||
*sec_minor = (drm_minor_t){.dev = NULL, .class = DRM_MINOR_FREE};
|
||||
*sec_minor = (drm_minor_t) {
|
||||
.dev = NULL,.class = DRM_MINOR_FREE};
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -58,6 +58,7 @@ static ssize_t version_show(struct class *dev, char *buf)
|
|||
return sprintf(buf, "%s %d.%d.%d %s\n", DRIVER_NAME, DRIVER_MAJOR,
|
||||
DRIVER_MINOR, DRIVER_PATCHLEVEL, DRIVER_DATE);
|
||||
}
|
||||
|
||||
static CLASS_ATTR(version, S_IRUGO, version_show, NULL);
|
||||
|
||||
/**
|
||||
|
@ -134,7 +135,9 @@ void drm_sysfs_destroy(struct drm_sysfs_class *cs)
|
|||
* Note: the struct drm_sysfs_class passed to this function must have previously been
|
||||
* created with a call to drm_sysfs_create().
|
||||
*/
|
||||
struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, dev_t dev, struct device *device, const char *fmt, ...)
|
||||
struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, dev_t dev,
|
||||
struct device *device,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
struct simple_dev *s_dev = NULL;
|
||||
|
@ -203,4 +206,3 @@ void drm_sysfs_device_remove(dev_t dev)
|
|||
spin_unlock(&simple_dev_list_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -35,7 +35,6 @@
|
|||
|
||||
#include "drmP.h"
|
||||
|
||||
|
||||
/**
|
||||
* \c nopage method for AGP virtual memory.
|
||||
*
|
||||
|
@ -62,13 +61,16 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
|
|||
if (!drm_core_has_AGP(dev))
|
||||
goto vm_nopage_error;
|
||||
|
||||
if(!dev->agp || !dev->agp->cant_use_aperture) goto vm_nopage_error;
|
||||
if (!dev->agp || !dev->agp->cant_use_aperture)
|
||||
goto vm_nopage_error;
|
||||
|
||||
list_for_each(list, &dev->maplist->head) {
|
||||
r_list = list_entry(list, drm_map_list_t, head);
|
||||
map = r_list->map;
|
||||
if (!map) continue;
|
||||
if (map->offset == VM_OFFSET(vma)) break;
|
||||
if (!map)
|
||||
continue;
|
||||
if (map->offset == VM_OFFSET(vma))
|
||||
break;
|
||||
}
|
||||
|
||||
if (map && map->type == _DRM_AGP) {
|
||||
|
@ -93,7 +95,8 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
|
|||
break;
|
||||
}
|
||||
|
||||
if (!agpmem) goto vm_nopage_error;
|
||||
if (!agpmem)
|
||||
goto vm_nopage_error;
|
||||
|
||||
/*
|
||||
* Get the page, inc the use count, and return it
|
||||
|
@ -104,7 +107,8 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
|
|||
|
||||
#if 0
|
||||
/* page_count() not defined everywhere */
|
||||
DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
|
||||
DRM_DEBUG
|
||||
("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
|
||||
baddr, __va(agpmem->memory->memory[offset]), offset,
|
||||
page_count(page));
|
||||
#endif
|
||||
|
@ -140,8 +144,10 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
|
|||
unsigned long i;
|
||||
struct page *page;
|
||||
|
||||
if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
|
||||
if (!map) return NOPAGE_OOM; /* Nothing allocated */
|
||||
if (address > vma->vm_end)
|
||||
return NOPAGE_SIGBUS; /* Disallow mremap */
|
||||
if (!map)
|
||||
return NOPAGE_OOM; /* Nothing allocated */
|
||||
|
||||
offset = address - vma->vm_start;
|
||||
i = (unsigned long)map->handle + offset;
|
||||
|
@ -154,7 +160,6 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
|
|||
return page;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \c close method for shared virtual memory.
|
||||
*
|
||||
|
@ -182,7 +187,8 @@ void drm_vm_shm_close(struct vm_area_struct *vma)
|
|||
down(&dev->struct_sem);
|
||||
for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
|
||||
next = pt->next;
|
||||
if (pt->vma->vm_private_data == map) found_maps++;
|
||||
if (pt->vma->vm_private_data == map)
|
||||
found_maps++;
|
||||
if (pt->vma == vma) {
|
||||
if (prev) {
|
||||
prev->next = pt->next;
|
||||
|
@ -195,8 +201,7 @@ void drm_vm_shm_close(struct vm_area_struct *vma)
|
|||
}
|
||||
}
|
||||
/* We were the only map that was found */
|
||||
if(found_maps == 1 &&
|
||||
map->flags & _DRM_REMOVABLE) {
|
||||
if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
|
||||
/* Check to see if we are in the maplist, if we are not, then
|
||||
* we delete this mappings information.
|
||||
*/
|
||||
|
@ -204,7 +209,8 @@ void drm_vm_shm_close(struct vm_area_struct *vma)
|
|||
list = &dev->maplist->head;
|
||||
list_for_each(list, &dev->maplist->head) {
|
||||
r_list = list_entry(list, drm_map_list_t, head);
|
||||
if (r_list->map == map) found_maps++;
|
||||
if (r_list->map == map)
|
||||
found_maps++;
|
||||
}
|
||||
|
||||
if (!found_maps) {
|
||||
|
@ -252,14 +258,16 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
|
|||
unsigned long page_nr;
|
||||
struct page *page;
|
||||
|
||||
if (!dma) return NOPAGE_SIGBUS; /* Error */
|
||||
if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
|
||||
if (!dma->pagelist) return NOPAGE_OOM ; /* Nothing allocated */
|
||||
if (!dma)
|
||||
return NOPAGE_SIGBUS; /* Error */
|
||||
if (address > vma->vm_end)
|
||||
return NOPAGE_SIGBUS; /* Disallow mremap */
|
||||
if (!dma->pagelist)
|
||||
return NOPAGE_OOM; /* Nothing allocated */
|
||||
|
||||
offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
|
||||
page_nr = offset >> PAGE_SHIFT;
|
||||
page = virt_to_page((dma->pagelist[page_nr] +
|
||||
(offset & (~PAGE_MASK))));
|
||||
page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
|
||||
|
||||
get_page(page);
|
||||
|
||||
|
@ -288,10 +296,12 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
|
|||
unsigned long page_offset;
|
||||
struct page *page;
|
||||
|
||||
if (!entry) return NOPAGE_SIGBUS; /* Error */
|
||||
if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
|
||||
if (!entry->pagelist) return NOPAGE_OOM ; /* Nothing allocated */
|
||||
|
||||
if (!entry)
|
||||
return NOPAGE_SIGBUS; /* Error */
|
||||
if (address > vma->vm_end)
|
||||
return NOPAGE_SIGBUS; /* Disallow mremap */
|
||||
if (!entry->pagelist)
|
||||
return NOPAGE_OOM; /* Nothing allocated */
|
||||
|
||||
offset = address - vma->vm_start;
|
||||
map_offset = map->offset - dev->sg->handle;
|
||||
|
@ -302,66 +312,68 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
|
|||
return page;
|
||||
}
|
||||
|
||||
|
||||
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
|
||||
|
||||
static struct page *drm_vm_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int *type) {
|
||||
if (type) *type = VM_FAULT_MINOR;
|
||||
unsigned long address, int *type)
|
||||
{
|
||||
if (type)
|
||||
*type = VM_FAULT_MINOR;
|
||||
return drm_do_vm_nopage(vma, address);
|
||||
}
|
||||
|
||||
static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int *type) {
|
||||
if (type) *type = VM_FAULT_MINOR;
|
||||
unsigned long address, int *type)
|
||||
{
|
||||
if (type)
|
||||
*type = VM_FAULT_MINOR;
|
||||
return drm_do_vm_shm_nopage(vma, address);
|
||||
}
|
||||
|
||||
static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int *type) {
|
||||
if (type) *type = VM_FAULT_MINOR;
|
||||
unsigned long address, int *type)
|
||||
{
|
||||
if (type)
|
||||
*type = VM_FAULT_MINOR;
|
||||
return drm_do_vm_dma_nopage(vma, address);
|
||||
}
|
||||
|
||||
static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int *type) {
|
||||
if (type) *type = VM_FAULT_MINOR;
|
||||
unsigned long address, int *type)
|
||||
{
|
||||
if (type)
|
||||
*type = VM_FAULT_MINOR;
|
||||
return drm_do_vm_sg_nopage(vma, address);
|
||||
}
|
||||
|
||||
#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
|
||||
|
||||
static struct page *drm_vm_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int unused) {
|
||||
unsigned long address, int unused)
|
||||
{
|
||||
return drm_do_vm_nopage(vma, address);
|
||||
}
|
||||
|
||||
static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int unused) {
|
||||
unsigned long address, int unused)
|
||||
{
|
||||
return drm_do_vm_shm_nopage(vma, address);
|
||||
}
|
||||
|
||||
static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int unused) {
|
||||
unsigned long address, int unused)
|
||||
{
|
||||
return drm_do_vm_dma_nopage(vma, address);
|
||||
}
|
||||
|
||||
static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int unused) {
|
||||
unsigned long address, int unused)
|
||||
{
|
||||
return drm_do_vm_sg_nopage(vma, address);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/** AGP virtual memory operations */
|
||||
static struct vm_operations_struct drm_vm_ops = {
|
||||
.nopage = drm_vm_nopage,
|
||||
|
@ -390,7 +402,6 @@ static struct vm_operations_struct drm_vm_sg_ops = {
|
|||
.close = drm_vm_close,
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* \c open method for shared virtual memory.
|
||||
*
|
||||
|
@ -537,7 +548,8 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
|
||||
vma->vm_start, vma->vm_end, VM_OFFSET(vma));
|
||||
|
||||
if ( !priv->authenticated ) return -EACCES;
|
||||
if (!priv->authenticated)
|
||||
return -EACCES;
|
||||
|
||||
/* We check for "dma". On Apple's UniNorth, it's valid to have
|
||||
* the AGP mapped at physical address 0
|
||||
|
@ -545,7 +557,8 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
*/
|
||||
if (!VM_OFFSET(vma)
|
||||
#if __OS_HAS_AGP
|
||||
&& (!dev->agp || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
|
||||
&& (!dev->agp
|
||||
|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
|
||||
#endif
|
||||
)
|
||||
return drm_mmap_dma(filp, vma);
|
||||
|
@ -562,16 +575,19 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
|
||||
r_list = list_entry(list, drm_map_list_t, head);
|
||||
map = r_list->map;
|
||||
if (!map) continue;
|
||||
if (!map)
|
||||
continue;
|
||||
off = dev->fn_tbl->get_map_ofs(map);
|
||||
if (off == VM_OFFSET(vma)) break;
|
||||
if (off == VM_OFFSET(vma))
|
||||
break;
|
||||
}
|
||||
|
||||
if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
|
||||
return -EPERM;
|
||||
|
||||
/* Check for valid size. */
|
||||
if (map->size < vma->vm_end - vma->vm_start) return -EINVAL;
|
||||
if (map->size < vma->vm_end - vma->vm_start)
|
||||
return -EINVAL;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
|
||||
vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
|
||||
|
@ -581,8 +597,10 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
/* Ye gads this is ugly. With more thought
|
||||
we could move this up higher and use
|
||||
`protection_map' instead. */
|
||||
vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
|
||||
__pte(pgprot_val(vma->vm_page_prot)))));
|
||||
vma->vm_page_prot =
|
||||
__pgprot(pte_val
|
||||
(pte_wrprotect
|
||||
(__pte(pgprot_val(vma->vm_page_prot)))));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -610,13 +628,15 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
|
||||
}
|
||||
#elif defined(__powerpc__)
|
||||
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED;
|
||||
pgprot_val(vma->vm_page_prot) |=
|
||||
_PAGE_NO_CACHE | _PAGE_GUARDED;
|
||||
#endif
|
||||
vma->vm_flags |= VM_IO; /* not in core dump */
|
||||
}
|
||||
#if defined(__ia64__)
|
||||
if (map->type != _DRM_AGP)
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
vma->vm_page_prot =
|
||||
pgprot_writecombine(vma->vm_page_prot);
|
||||
#endif
|
||||
offset = dev->fn_tbl->get_reg_ofs(dev);
|
||||
#ifdef __sparc__
|
||||
|
|
|
@ -10,13 +10,10 @@
|
|||
#include <linux/sched.h>
|
||||
#include <asm/upa.h>
|
||||
|
||||
#include "ffb.h"
|
||||
#include "drmP.h"
|
||||
|
||||
#include "ffb_drv.h"
|
||||
|
||||
static int DRM(alloc_queue)(drm_device_t *dev, int is_2d_only)
|
||||
{
|
||||
static int DRM(alloc_queue) (drm_device_t * dev, int is_2d_only) {
|
||||
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
|
||||
int i;
|
||||
|
||||
|
@ -354,8 +351,7 @@ static void FFBWait(ffb_fbcPtr ffb)
|
|||
} while (--limit);
|
||||
}
|
||||
|
||||
int DRM(context_switch)(drm_device_t *dev, int old, int new)
|
||||
{
|
||||
int DRM(context_switch) (drm_device_t * dev, int old, int new) {
|
||||
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
|
||||
|
||||
#if DRM_DMA_HISTOGRAM
|
||||
|
@ -364,8 +360,7 @@ int DRM(context_switch)(drm_device_t *dev, int old, int new)
|
|||
|
||||
DRM_DEBUG("Context switch from %d to %d\n", old, new);
|
||||
|
||||
if (new == dev->last_context ||
|
||||
dev->last_context == 0) {
|
||||
if (new == dev->last_context || dev->last_context == 0) {
|
||||
dev->last_context = new;
|
||||
return 0;
|
||||
}
|
||||
|
@ -381,8 +376,7 @@ int DRM(context_switch)(drm_device_t *dev, int old, int new)
|
|||
}
|
||||
|
||||
int DRM(resctx) (struct inode * inode, struct file * filp, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
unsigned long arg) {
|
||||
drm_ctx_res_t res;
|
||||
drm_ctx_t ctx;
|
||||
int i;
|
||||
|
@ -394,9 +388,7 @@ int DRM(resctx)(struct inode *inode, struct file *filp, unsigned int cmd,
|
|||
memset(&ctx, 0, sizeof(ctx));
|
||||
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
|
||||
ctx.handle = i;
|
||||
if (copy_to_user(&res.contexts[i],
|
||||
&i,
|
||||
sizeof(i)))
|
||||
if (copy_to_user(&res.contexts[i], &i, sizeof(i)))
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
|
@ -406,10 +398,8 @@ int DRM(resctx)(struct inode *inode, struct file *filp, unsigned int cmd,
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int DRM(addctx) (struct inode * inode, struct file * filp, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
unsigned long arg) {
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_t ctx;
|
||||
|
@ -429,8 +419,7 @@ int DRM(addctx)(struct inode *inode, struct file *filp, unsigned int cmd,
|
|||
}
|
||||
|
||||
int DRM(modctx) (struct inode * inode, struct file * filp, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
unsigned long arg) {
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
|
||||
|
@ -458,8 +447,7 @@ int DRM(modctx)(struct inode *inode, struct file *filp, unsigned int cmd,
|
|||
}
|
||||
|
||||
int DRM(getctx) (struct inode * inode, struct file * filp, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
unsigned long arg) {
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
|
||||
|
@ -490,8 +478,7 @@ int DRM(getctx)(struct inode *inode, struct file *filp, unsigned int cmd,
|
|||
}
|
||||
|
||||
int DRM(switchctx) (struct inode * inode, struct file * filp, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
unsigned long arg) {
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_t ctx;
|
||||
|
@ -503,8 +490,7 @@ int DRM(switchctx)(struct inode *inode, struct file *filp, unsigned int cmd,
|
|||
}
|
||||
|
||||
int DRM(newctx) (struct inode * inode, struct file * filp, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
unsigned long arg) {
|
||||
drm_ctx_t ctx;
|
||||
|
||||
if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
|
||||
|
@ -515,8 +501,7 @@ int DRM(newctx)(struct inode *inode, struct file *filp, unsigned int cmd,
|
|||
}
|
||||
|
||||
int DRM(rmctx) (struct inode * inode, struct file * filp, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
unsigned long arg) {
|
||||
drm_ctx_t ctx;
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
|
@ -546,8 +531,7 @@ static void ffb_driver_release(drm_device_t *dev)
|
|||
|
||||
idx = context - 1;
|
||||
if (fpriv &&
|
||||
context != DRM_KERNEL_CONTEXT &&
|
||||
fpriv->hw_state[idx] != NULL) {
|
||||
context != DRM_KERNEL_CONTEXT && fpriv->hw_state[idx] != NULL) {
|
||||
kfree(fpriv->hw_state[idx]);
|
||||
fpriv->hw_state[idx] = NULL;
|
||||
}
|
||||
|
@ -557,17 +541,20 @@ static int ffb_driver_presetup(drm_device_t *dev)
|
|||
{
|
||||
int ret;
|
||||
ret = ffb_presetup(dev);
|
||||
if (_ret != 0) return ret;
|
||||
if (_ret != 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ffb_driver_pretakedown(drm_device_t * dev)
|
||||
{
|
||||
if (dev->dev_private) kfree(dev->dev_private);
|
||||
if (dev->dev_private)
|
||||
kfree(dev->dev_private);
|
||||
}
|
||||
|
||||
static void ffb_driver_postcleanup(drm_device_t * dev)
|
||||
{
|
||||
if (ffb_position != NULL) kfree(ffb_position);
|
||||
if (ffb_position != NULL)
|
||||
kfree(ffb_position);
|
||||
}
|
||||
|
||||
static int ffb_driver_kernel_context_switch_unlock(struct drm_device *dev)
|
||||
|
|
|
@ -5,17 +5,15 @@
|
|||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include "ffb.h"
|
||||
#include "drmP.h"
|
||||
|
||||
#include "ffb_drv.h"
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <asm/shmparam.h>
|
||||
#include <asm/oplib.h>
|
||||
#include <asm/upa.h>
|
||||
|
||||
#include "drmP.h"
|
||||
#include "ffb_drv.h"
|
||||
|
||||
#define DRIVER_AUTHOR "David S. Miller"
|
||||
|
||||
#define DRIVER_NAME "ffb"
|
||||
|
|
|
@ -30,13 +30,14 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <linux/interrupt.h> /* For task queue support */
|
||||
#include <linux/delay.h>
|
||||
#include <linux/pagemap.h>
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "i810_drm.h"
|
||||
#include "i810_drv.h"
|
||||
#include <linux/interrupt.h> /* For task queue support */
|
||||
#include <linux/delay.h>
|
||||
#include <linux/pagemap.h>
|
||||
|
||||
#ifdef DO_MUNMAP_4_ARGS
|
||||
#define DO_MUNMAP(m, a, l) do_munmap(m, a, l, 1)
|
||||
|
@ -131,8 +132,8 @@ int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
|
|||
|
||||
if (remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
|
||||
VM_OFFSET(vma),
|
||||
vma->vm_end - vma->vm_start,
|
||||
vma->vm_page_prot)) return -EAGAIN;
|
||||
vma->vm_end - vma->vm_start, vma->vm_page_prot))
|
||||
return -EAGAIN;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -151,8 +152,7 @@ static int i810_map_buffer(drm_buf_t *buf, struct file *filp)
|
|||
dev_priv->mmap_buffer = buf;
|
||||
buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED,
|
||||
buf->bus_address);
|
||||
MAP_SHARED, buf->bus_address);
|
||||
dev_priv->mmap_buffer = NULL;
|
||||
|
||||
if ((unsigned long)buf_priv->virtual > -1024UL) {
|
||||
|
@ -251,7 +251,8 @@ int i810_dma_cleanup(drm_device_t *dev)
|
|||
drm_buf_t *buf = dma->buflist[i];
|
||||
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
|
||||
if (buf_priv->kernel_virtual && buf->total)
|
||||
drm_ioremapfree(buf_priv->kernel_virtual, buf->total, dev);
|
||||
drm_ioremapfree(buf_priv->kernel_virtual,
|
||||
buf->total, dev);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -269,7 +270,8 @@ static int i810_wait_ring(drm_device_t *dev, int n)
|
|||
while (ring->space < n) {
|
||||
ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
|
||||
ring->space = ring->head - (ring->tail + 8);
|
||||
if (ring->space < 0) ring->space += ring->Size;
|
||||
if (ring->space < 0)
|
||||
ring->space += ring->Size;
|
||||
|
||||
if (ring->head != last_head) {
|
||||
end = jiffies + (HZ * 3);
|
||||
|
@ -297,7 +299,8 @@ static void i810_kernel_lost_context(drm_device_t *dev)
|
|||
ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
|
||||
ring->tail = I810_READ(LP_RING + RING_TAIL);
|
||||
ring->space = ring->head - (ring->tail + 8);
|
||||
if (ring->space < 0) ring->space += ring->Size;
|
||||
if (ring->space < 0)
|
||||
ring->space += ring->Size;
|
||||
}
|
||||
|
||||
static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv)
|
||||
|
@ -367,8 +370,7 @@ static int i810_dma_initialize(drm_device_t *dev,
|
|||
}
|
||||
|
||||
dev_priv->sarea_priv = (drm_i810_sarea_t *)
|
||||
((u8 *)dev_priv->sarea_map->handle +
|
||||
init->sarea_priv_offset);
|
||||
((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset);
|
||||
|
||||
dev_priv->ring.Start = init->ring_start;
|
||||
dev_priv->ring.End = init->ring_end;
|
||||
|
@ -484,7 +486,8 @@ int i810_dma_init(struct inode *inode, struct file *filp,
|
|||
int retcode = 0;
|
||||
|
||||
/* Get only the init func */
|
||||
if (copy_from_user(&init, (void __user *)arg, sizeof(drm_i810_init_func_t)))
|
||||
if (copy_from_user
|
||||
(&init, (void __user *)arg, sizeof(drm_i810_init_func_t)))
|
||||
return -EFAULT;
|
||||
|
||||
switch (init.func) {
|
||||
|
@ -528,8 +531,6 @@ int i810_dma_init(struct inode *inode, struct file *filp,
|
|||
return retcode;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* Most efficient way to verify state for the i810 is as it is
|
||||
* emitted. Non-conformant state is silently dropped.
|
||||
*
|
||||
|
@ -556,12 +557,11 @@ static void i810EmitContextVerified( drm_device_t *dev,
|
|||
tmp = code[i];
|
||||
|
||||
if ((tmp & (7 << 29)) == (3 << 29) &&
|
||||
(tmp & (0x1f<<24)) < (0x1d<<24))
|
||||
{
|
||||
(tmp & (0x1f << 24)) < (0x1d << 24)) {
|
||||
OUT_RING(tmp);
|
||||
j++;
|
||||
}
|
||||
else printk("constext state dropped!!!\n");
|
||||
} else
|
||||
printk("constext state dropped!!!\n");
|
||||
}
|
||||
|
||||
if (j & 1)
|
||||
|
@ -570,8 +570,7 @@ static void i810EmitContextVerified( drm_device_t *dev,
|
|||
ADVANCE_LP_RING();
|
||||
}
|
||||
|
||||
static void i810EmitTexVerified( drm_device_t *dev,
|
||||
volatile unsigned int *code )
|
||||
static void i810EmitTexVerified(drm_device_t * dev, volatile unsigned int *code)
|
||||
{
|
||||
drm_i810_private_t *dev_priv = dev->dev_private;
|
||||
int i, j = 0;
|
||||
|
@ -589,12 +588,11 @@ static void i810EmitTexVerified( drm_device_t *dev,
|
|||
tmp = code[i];
|
||||
|
||||
if ((tmp & (7 << 29)) == (3 << 29) &&
|
||||
(tmp & (0x1f<<24)) < (0x1d<<24))
|
||||
{
|
||||
(tmp & (0x1f << 24)) < (0x1d << 24)) {
|
||||
OUT_RING(tmp);
|
||||
j++;
|
||||
}
|
||||
else printk("texture state dropped!!!\n");
|
||||
} else
|
||||
printk("texture state dropped!!!\n");
|
||||
}
|
||||
|
||||
if (j & 1)
|
||||
|
@ -603,7 +601,6 @@ static void i810EmitTexVerified( drm_device_t *dev,
|
|||
ADVANCE_LP_RING();
|
||||
}
|
||||
|
||||
|
||||
/* Need to do some additional checking when setting the dest buffer.
|
||||
*/
|
||||
static void i810EmitDestVerified(drm_device_t * dev,
|
||||
|
@ -641,8 +638,6 @@ static void i810EmitDestVerified( drm_device_t *dev,
|
|||
ADVANCE_LP_RING();
|
||||
}
|
||||
|
||||
|
||||
|
||||
static void i810EmitState(drm_device_t * dev)
|
||||
{
|
||||
drm_i810_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -672,8 +667,6 @@ static void i810EmitState( drm_device_t *dev )
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* need to verify
|
||||
*/
|
||||
static void i810_dma_dispatch_clear(drm_device_t * dev, int flags,
|
||||
|
@ -693,8 +686,10 @@ static void i810_dma_dispatch_clear( drm_device_t *dev, int flags,
|
|||
unsigned int tmp = flags;
|
||||
|
||||
flags &= ~(I810_FRONT | I810_BACK);
|
||||
if (tmp & I810_FRONT) flags |= I810_BACK;
|
||||
if (tmp & I810_BACK) flags |= I810_FRONT;
|
||||
if (tmp & I810_FRONT)
|
||||
flags |= I810_BACK;
|
||||
if (tmp & I810_BACK)
|
||||
flags |= I810_FRONT;
|
||||
}
|
||||
|
||||
i810_kernel_lost_context(dev);
|
||||
|
@ -711,14 +706,12 @@ static void i810_dma_dispatch_clear( drm_device_t *dev, int flags,
|
|||
|
||||
if (pbox->x1 > pbox->x2 ||
|
||||
pbox->y1 > pbox->y2 ||
|
||||
pbox->x2 > dev_priv->w ||
|
||||
pbox->y2 > dev_priv->h)
|
||||
pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
|
||||
continue;
|
||||
|
||||
if (flags & I810_FRONT) {
|
||||
BEGIN_LP_RING(6);
|
||||
OUT_RING( BR00_BITBLT_CLIENT |
|
||||
BR00_OP_COLOR_BLT | 0x3 );
|
||||
OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
|
||||
OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
|
||||
OUT_RING((height << 16) | width);
|
||||
OUT_RING(start);
|
||||
|
@ -729,8 +722,7 @@ static void i810_dma_dispatch_clear( drm_device_t *dev, int flags,
|
|||
|
||||
if (flags & I810_BACK) {
|
||||
BEGIN_LP_RING(6);
|
||||
OUT_RING( BR00_BITBLT_CLIENT |
|
||||
BR00_OP_COLOR_BLT | 0x3 );
|
||||
OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
|
||||
OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
|
||||
OUT_RING((height << 16) | width);
|
||||
OUT_RING(dev_priv->back_offset + start);
|
||||
|
@ -741,8 +733,7 @@ static void i810_dma_dispatch_clear( drm_device_t *dev, int flags,
|
|||
|
||||
if (flags & I810_DEPTH) {
|
||||
BEGIN_LP_RING(6);
|
||||
OUT_RING( BR00_BITBLT_CLIENT |
|
||||
BR00_OP_COLOR_BLT | 0x3 );
|
||||
OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
|
||||
OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
|
||||
OUT_RING((height << 16) | width);
|
||||
OUT_RING(dev_priv->depth_offset + start);
|
||||
|
@ -771,8 +762,7 @@ static void i810_dma_dispatch_swap( drm_device_t *dev )
|
|||
if (nbox > I810_NR_SAREA_CLIPRECTS)
|
||||
nbox = I810_NR_SAREA_CLIPRECTS;
|
||||
|
||||
for (i = 0 ; i < nbox; i++, pbox++)
|
||||
{
|
||||
for (i = 0; i < nbox; i++, pbox++) {
|
||||
unsigned int w = pbox->x2 - pbox->x1;
|
||||
unsigned int h = pbox->y2 - pbox->y1;
|
||||
unsigned int dst = pbox->x1 * cpp + pbox->y1 * pitch;
|
||||
|
@ -780,8 +770,7 @@ static void i810_dma_dispatch_swap( drm_device_t *dev )
|
|||
|
||||
if (pbox->x1 > pbox->x2 ||
|
||||
pbox->y1 > pbox->y2 ||
|
||||
pbox->x2 > dev_priv->w ||
|
||||
pbox->y2 > dev_priv->h)
|
||||
pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
|
||||
continue;
|
||||
|
||||
BEGIN_LP_RING(6);
|
||||
|
@ -801,11 +790,8 @@ static void i810_dma_dispatch_swap( drm_device_t *dev )
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
static void i810_dma_dispatch_vertex(drm_device_t * dev,
|
||||
drm_buf_t *buf,
|
||||
int discard,
|
||||
int used)
|
||||
drm_buf_t * buf, int discard, int used)
|
||||
{
|
||||
drm_i810_private_t *dev_priv = dev->dev_private;
|
||||
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
|
||||
|
@ -831,7 +817,8 @@ static void i810_dma_dispatch_vertex(drm_device_t *dev,
|
|||
if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
|
||||
unsigned int prim = (sarea_priv->vertex_prim & PR_MASK);
|
||||
|
||||
*(u32 *)buf_priv->kernel_virtual = ((GFX_OP_PRIMITIVE | prim | ((used/4)-2)));
|
||||
*(u32 *) buf_priv->kernel_virtual =
|
||||
((GFX_OP_PRIMITIVE | prim | ((used / 4) - 2)));
|
||||
|
||||
if (used & 4) {
|
||||
*(u32 *) ((u32) buf_priv->kernel_virtual + used) = 0;
|
||||
|
@ -849,7 +836,8 @@ static void i810_dma_dispatch_vertex(drm_device_t *dev,
|
|||
SC_ENABLE);
|
||||
OUT_RING(GFX_OP_SCISSOR_INFO);
|
||||
OUT_RING(box[i].x1 | (box[i].y1 << 16));
|
||||
OUT_RING( (box[i].x2-1) | ((box[i].y2-1)<<16) );
|
||||
OUT_RING((box[i].x2 -
|
||||
1) | ((box[i].y2 - 1) << 16));
|
||||
ADVANCE_LP_RING();
|
||||
}
|
||||
|
||||
|
@ -990,9 +978,12 @@ void i810_reclaim_buffers(struct file *filp)
|
|||
drm_device_dma_t *dma = dev->dma;
|
||||
int i;
|
||||
|
||||
if (!dma) return;
|
||||
if (!dev->dev_private) return;
|
||||
if (!dma->buflist) return;
|
||||
if (!dma)
|
||||
return;
|
||||
if (!dev->dev_private)
|
||||
return;
|
||||
if (!dma->buflist)
|
||||
return;
|
||||
|
||||
i810_flush_queue(dev);
|
||||
|
||||
|
@ -1027,7 +1018,6 @@ int i810_flush_ioctl(struct inode *inode, struct file *filp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int i810_dma_vertex(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
|
@ -1040,7 +1030,8 @@ int i810_dma_vertex(struct inode *inode, struct file *filp,
|
|||
dev_priv->sarea_priv;
|
||||
drm_i810_vertex_t vertex;
|
||||
|
||||
if (copy_from_user(&vertex, (drm_i810_vertex_t __user *)arg, sizeof(vertex)))
|
||||
if (copy_from_user
|
||||
(&vertex, (drm_i810_vertex_t __user *) arg, sizeof(vertex)))
|
||||
return -EFAULT;
|
||||
|
||||
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
|
||||
|
@ -1066,8 +1057,6 @@ int i810_dma_vertex(struct inode *inode, struct file *filp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
int i810_clear_bufs(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
|
@ -1075,7 +1064,8 @@ int i810_clear_bufs(struct inode *inode, struct file *filp,
|
|||
drm_device_t *dev = priv->dev;
|
||||
drm_i810_clear_t clear;
|
||||
|
||||
if (copy_from_user(&clear, (drm_i810_clear_t __user *)arg, sizeof(clear)))
|
||||
if (copy_from_user
|
||||
(&clear, (drm_i810_clear_t __user *) arg, sizeof(clear)))
|
||||
return -EFAULT;
|
||||
|
||||
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
|
||||
|
@ -1089,8 +1079,7 @@ int i810_clear_bufs(struct inode *inode, struct file *filp,
|
|||
}
|
||||
|
||||
i810_dma_dispatch_clear(dev, clear.flags,
|
||||
clear.clear_color,
|
||||
clear.clear_depth );
|
||||
clear.clear_color, clear.clear_depth);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1160,9 +1149,7 @@ int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
|
|||
}
|
||||
|
||||
int i810_copybuf(struct inode *inode,
|
||||
struct file *filp,
|
||||
unsigned int cmd,
|
||||
unsigned long arg)
|
||||
struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
/* Never copy - 2.4.x doesn't need it */
|
||||
return 0;
|
||||
|
@ -1188,8 +1175,7 @@ static void i810_dma_dispatch_mc(drm_device_t *dev, drm_buf_t *buf, int used,
|
|||
|
||||
i810_kernel_lost_context(dev);
|
||||
|
||||
u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
|
||||
I810_BUF_HARDWARE);
|
||||
u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_HARDWARE);
|
||||
if (u != I810_BUF_CLIENT) {
|
||||
DRM_DEBUG("MC found buffer that isn't mine!\n");
|
||||
}
|
||||
|
@ -1199,8 +1185,7 @@ static void i810_dma_dispatch_mc(drm_device_t *dev, drm_buf_t *buf, int used,
|
|||
|
||||
sarea_priv->dirty = 0x7f;
|
||||
|
||||
DRM_DEBUG("dispatch mc addr 0x%lx, used 0x%x\n",
|
||||
address, used);
|
||||
DRM_DEBUG("dispatch mc addr 0x%lx, used 0x%x\n", address, used);
|
||||
|
||||
dev_priv->counter++;
|
||||
DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
|
||||
|
@ -1224,7 +1209,6 @@ static void i810_dma_dispatch_mc(drm_device_t *dev, drm_buf_t *buf, int used,
|
|||
OUT_RING(0);
|
||||
ADVANCE_LP_RING();
|
||||
|
||||
|
||||
BEGIN_LP_RING(8);
|
||||
OUT_RING(CMD_STORE_DWORD_IDX);
|
||||
OUT_RING(buf_priv->my_use_idx);
|
||||
|
@ -1292,7 +1276,8 @@ int i810_ov0_info(struct inode *inode, struct file *filp,
|
|||
|
||||
data.offset = dev_priv->overlay_offset;
|
||||
data.physical = dev_priv->overlay_physical;
|
||||
if (copy_to_user((drm_i810_overlay_t __user *)arg,&data,sizeof(data)))
|
||||
if (copy_to_user
|
||||
((drm_i810_overlay_t __user *) arg, &data, sizeof(data)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
@ -1322,14 +1307,12 @@ int i810_ov0_flip(struct inode *inode, struct file *filp,
|
|||
DRM_ERROR("i810_ov0_flip called without lock held\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
//Tell the overlay to update
|
||||
I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* Not sure why this isn't set all the time:
|
||||
*/
|
||||
static void i810_do_init_pageflip(drm_device_t * dev)
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
#define I810_UPLOAD_TEX1 0x20
|
||||
#define I810_UPLOAD_CLIPRECTS 0x40
|
||||
|
||||
|
||||
/* Indices into buf.Setup where various bits of state are mirrored per
|
||||
* context and per buffer. These can be fired at the card as a unit,
|
||||
* or in a piecewise fashion as required.
|
||||
|
@ -269,7 +268,6 @@ typedef struct _drm_i810_copy_t {
|
|||
#define PR_RECTS (0x7<<18)
|
||||
#define PR_MASK (0x7<<18)
|
||||
|
||||
|
||||
typedef struct drm_i810_dma {
|
||||
void *virtual;
|
||||
int request_idx;
|
||||
|
@ -290,5 +288,4 @@ typedef struct _drm_i810_mc {
|
|||
unsigned int last_render; /* Last Render Request */
|
||||
} drm_i810_mc_t;
|
||||
|
||||
|
||||
#endif /* _I810_DRM_H_ */
|
||||
|
|
|
@ -52,9 +52,7 @@ static int postinit( struct drm_device *dev, unsigned long flags )
|
|||
DRIVER_MAJOR,
|
||||
DRIVER_MINOR,
|
||||
DRIVER_PATCHLEVEL,
|
||||
DRIVER_DATE,
|
||||
dev->minor,
|
||||
pci_pretty_name(dev->pdev)
|
||||
DRIVER_DATE, dev->minor, pci_pretty_name(dev->pdev)
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
|
@ -95,7 +93,9 @@ static drm_ioctl_desc_t ioctls[] = {
|
|||
};
|
||||
|
||||
static struct drm_driver_fn driver_fn = {
|
||||
.driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
|
||||
DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
|
||||
.dev_priv_size = sizeof(drm_i810_buf_priv_t),
|
||||
.pretakedown = i810_driver_pretakedown,
|
||||
.release = i810_driver_release,
|
||||
|
@ -114,7 +114,8 @@ static struct drm_driver_fn driver_fn = {
|
|||
.ioctl = drm_ioctl,
|
||||
.mmap = i810_mmap_buffers,
|
||||
.fasync = drm_fasync,
|
||||
},
|
||||
}
|
||||
,
|
||||
};
|
||||
|
||||
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
|
|
@ -88,7 +88,6 @@ typedef struct drm_i810_private {
|
|||
|
||||
drm_buf_t *mmap_buffer;
|
||||
|
||||
|
||||
u32 front_di1, back_di1, zi1;
|
||||
|
||||
int back_offset;
|
||||
|
@ -147,7 +146,6 @@ extern int i810_ov0_flip(struct inode *inode, struct file *filp,
|
|||
extern int i810_dma_mc(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
|
||||
|
||||
extern void i810_dma_quiescent(drm_device_t * dev);
|
||||
|
||||
extern int i810_dma_vertex(struct inode *inode, struct file *filp,
|
||||
|
@ -214,7 +212,6 @@ extern void i810_driver_pretakedown(drm_device_t *dev);
|
|||
#define INST_OP_FLUSH 0x02000000
|
||||
#define INST_FLUSH_MAP_CACHE 0x00000001
|
||||
|
||||
|
||||
#define BB1_START_ADDR_MASK (~0x7)
|
||||
#define BB1_PROTECTED (1<<0)
|
||||
#define BB1_UNPROTECTED (0<<0)
|
||||
|
|
|
@ -31,15 +31,16 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "i830_drm.h"
|
||||
#include "i830_drv.h"
|
||||
#include <linux/interrupt.h> /* For task queue support */
|
||||
#include <linux/pagemap.h> /* For FASTCALL on unlock_page() */
|
||||
#include <linux/delay.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "i830_drm.h"
|
||||
#include "i830_drv.h"
|
||||
|
||||
#ifdef DO_MUNMAP_4_ARGS
|
||||
#define DO_MUNMAP(m, a, l) do_munmap(m, a, l, 1)
|
||||
#else
|
||||
|
@ -132,8 +133,8 @@ int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
|
|||
|
||||
if (remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
|
||||
VM_OFFSET(vma),
|
||||
vma->vm_end - vma->vm_start,
|
||||
vma->vm_page_prot)) return -EAGAIN;
|
||||
vma->vm_end - vma->vm_start, vma->vm_page_prot))
|
||||
return -EAGAIN;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -146,7 +147,8 @@ static int i830_map_buffer(drm_buf_t *buf, struct file *filp)
|
|||
unsigned long virtual;
|
||||
int retcode = 0;
|
||||
|
||||
if(buf_priv->currently_mapped == I830_BUF_MAPPED) return -EINVAL;
|
||||
if (buf_priv->currently_mapped == I830_BUF_MAPPED)
|
||||
return -EINVAL;
|
||||
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
dev_priv->mmap_buffer = buf;
|
||||
|
@ -225,7 +227,8 @@ int i830_dma_cleanup(drm_device_t *dev)
|
|||
* may not have been called from userspace and after dev_private
|
||||
* is freed, it's too late.
|
||||
*/
|
||||
if (dev->irq_enabled) drm_irq_uninstall(dev);
|
||||
if (dev->irq_enabled)
|
||||
drm_irq_uninstall(dev);
|
||||
|
||||
if (dev->dev_private) {
|
||||
int i;
|
||||
|
@ -252,7 +255,8 @@ int i830_dma_cleanup(drm_device_t *dev)
|
|||
drm_buf_t *buf = dma->buflist[i];
|
||||
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
|
||||
if (buf_priv->kernel_virtual && buf->total)
|
||||
drm_ioremapfree(buf_priv->kernel_virtual, buf->total, dev);
|
||||
drm_ioremapfree(buf_priv->kernel_virtual,
|
||||
buf->total, dev);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -270,7 +274,8 @@ int i830_wait_ring(drm_device_t *dev, int n, const char *caller)
|
|||
while (ring->space < n) {
|
||||
ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
|
||||
ring->space = ring->head - (ring->tail + 8);
|
||||
if (ring->space < 0) ring->space += ring->Size;
|
||||
if (ring->space < 0)
|
||||
ring->space += ring->Size;
|
||||
|
||||
if (ring->head != last_head) {
|
||||
end = jiffies + (HZ * 3);
|
||||
|
@ -299,7 +304,8 @@ static void i830_kernel_lost_context(drm_device_t *dev)
|
|||
ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
|
||||
ring->tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
|
||||
ring->space = ring->head - (ring->tail + 8);
|
||||
if (ring->space < 0) ring->space += ring->Size;
|
||||
if (ring->space < 0)
|
||||
ring->space += ring->Size;
|
||||
|
||||
if (ring->head == ring->tail)
|
||||
dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY;
|
||||
|
@ -373,8 +379,7 @@ static int i830_dma_initialize(drm_device_t *dev,
|
|||
}
|
||||
|
||||
dev_priv->sarea_priv = (drm_i830_sarea_t *)
|
||||
((u8 *)dev_priv->sarea_map->handle +
|
||||
init->sarea_priv_offset);
|
||||
((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset);
|
||||
|
||||
dev_priv->ring.Start = init->ring_start;
|
||||
dev_priv->ring.End = init->ring_end;
|
||||
|
@ -465,7 +470,8 @@ int i830_dma_init(struct inode *inode, struct file *filp,
|
|||
case I830_INIT_DMA:
|
||||
dev_priv = drm_alloc(sizeof(drm_i830_private_t),
|
||||
DRM_MEM_DRIVER);
|
||||
if(dev_priv == NULL) return -ENOMEM;
|
||||
if (dev_priv == NULL)
|
||||
return -ENOMEM;
|
||||
retcode = i830_dma_initialize(dev, dev_priv, &init);
|
||||
break;
|
||||
case I830_CLEANUP_DMA:
|
||||
|
@ -486,8 +492,7 @@ int i830_dma_init(struct inode *inode, struct file *filp,
|
|||
/* Most efficient way to verify state for the i830 is as it is
|
||||
* emitted. Non-conformant state is silently dropped.
|
||||
*/
|
||||
static void i830EmitContextVerified( drm_device_t *dev,
|
||||
unsigned int *code )
|
||||
static void i830EmitContextVerified(drm_device_t * dev, unsigned int *code)
|
||||
{
|
||||
drm_i830_private_t *dev_priv = dev->dev_private;
|
||||
int i, j = 0;
|
||||
|
@ -562,14 +567,12 @@ static void i830EmitTexVerified( drm_device_t *dev, unsigned int *code )
|
|||
OUT_RING(0);
|
||||
|
||||
ADVANCE_LP_RING();
|
||||
}
|
||||
else
|
||||
} else
|
||||
printk("rejected packet %x\n", code[0]);
|
||||
}
|
||||
|
||||
static void i830EmitTexBlendVerified(drm_device_t * dev,
|
||||
unsigned int *code,
|
||||
unsigned int num)
|
||||
unsigned int *code, unsigned int num)
|
||||
{
|
||||
drm_i830_private_t *dev_priv = dev->dev_private;
|
||||
int i, j = 0;
|
||||
|
@ -594,9 +597,7 @@ static void i830EmitTexBlendVerified( drm_device_t *dev,
|
|||
}
|
||||
|
||||
static void i830EmitTexPalette(drm_device_t * dev,
|
||||
unsigned int *palette,
|
||||
int number,
|
||||
int is_shared )
|
||||
unsigned int *palette, int number, int is_shared)
|
||||
{
|
||||
drm_i830_private_t *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
|
@ -608,8 +609,7 @@ static void i830EmitTexPalette( drm_device_t *dev,
|
|||
|
||||
if (is_shared == 1) {
|
||||
OUT_RING(CMD_OP_MAP_PALETTE_LOAD |
|
||||
MAP_PALETTE_NUM(0) |
|
||||
MAP_PALETTE_BOTH);
|
||||
MAP_PALETTE_NUM(0) | MAP_PALETTE_BOTH);
|
||||
} else {
|
||||
OUT_RING(CMD_OP_MAP_PALETTE_LOAD | MAP_PALETTE_NUM(number));
|
||||
}
|
||||
|
@ -623,8 +623,7 @@ static void i830EmitTexPalette( drm_device_t *dev,
|
|||
|
||||
/* Need to do some additional checking when setting the dest buffer.
|
||||
*/
|
||||
static void i830EmitDestVerified( drm_device_t *dev,
|
||||
unsigned int *code )
|
||||
static void i830EmitDestVerified(drm_device_t * dev, unsigned int *code)
|
||||
{
|
||||
drm_i830_private_t *dev_priv = dev->dev_private;
|
||||
unsigned int tmp;
|
||||
|
@ -632,7 +631,6 @@ static void i830EmitDestVerified( drm_device_t *dev,
|
|||
|
||||
BEGIN_LP_RING(I830_DEST_SETUP_SIZE + 10);
|
||||
|
||||
|
||||
tmp = code[I830_DESTREG_CBUFADDR];
|
||||
if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
|
||||
if (((int)outring) & 8) {
|
||||
|
@ -660,7 +658,6 @@ static void i830EmitDestVerified( drm_device_t *dev,
|
|||
/* invarient:
|
||||
*/
|
||||
|
||||
|
||||
OUT_RING(GFX_OP_DESTBUFFER_VARS);
|
||||
OUT_RING(code[I830_DESTREG_DV1]);
|
||||
|
||||
|
@ -687,8 +684,7 @@ static void i830EmitDestVerified( drm_device_t *dev,
|
|||
ADVANCE_LP_RING();
|
||||
}
|
||||
|
||||
static void i830EmitStippleVerified( drm_device_t *dev,
|
||||
unsigned int *code )
|
||||
static void i830EmitStippleVerified(drm_device_t * dev, unsigned int *code)
|
||||
{
|
||||
drm_i830_private_t *dev_priv = dev->dev_private;
|
||||
RING_LOCALS;
|
||||
|
@ -699,7 +695,6 @@ static void i830EmitStippleVerified( drm_device_t *dev,
|
|||
ADVANCE_LP_RING();
|
||||
}
|
||||
|
||||
|
||||
static void i830EmitState(drm_device_t * dev)
|
||||
{
|
||||
drm_i830_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -769,8 +764,7 @@ static void i830EmitState( drm_device_t *dev )
|
|||
/* 1.3:
|
||||
*/
|
||||
if (dirty & I830_UPLOAD_STIPPLE) {
|
||||
i830EmitStippleVerified( dev,
|
||||
sarea_priv->StippleState);
|
||||
i830EmitStippleVerified(dev, sarea_priv->StippleState);
|
||||
sarea_priv->dirty &= ~I830_UPLOAD_STIPPLE;
|
||||
}
|
||||
|
||||
|
@ -784,10 +778,8 @@ static void i830EmitState( drm_device_t *dev )
|
|||
sarea_priv->dirty &= ~I830_UPLOAD_TEX3;
|
||||
}
|
||||
|
||||
|
||||
if (dirty & I830_UPLOAD_TEXBLEND2) {
|
||||
i830EmitTexBlendVerified(
|
||||
dev,
|
||||
i830EmitTexBlendVerified(dev,
|
||||
sarea_priv->TexBlendState2,
|
||||
sarea_priv->TexBlendStateWordsUsed2);
|
||||
|
||||
|
@ -795,8 +787,7 @@ static void i830EmitState( drm_device_t *dev )
|
|||
}
|
||||
|
||||
if (dirty & I830_UPLOAD_TEXBLEND3) {
|
||||
i830EmitTexBlendVerified(
|
||||
dev,
|
||||
i830EmitTexBlendVerified(dev,
|
||||
sarea_priv->TexBlendState3,
|
||||
sarea_priv->TexBlendStateWordsUsed3);
|
||||
sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND3;
|
||||
|
@ -808,8 +799,7 @@ static void i830EmitState( drm_device_t *dev )
|
|||
*/
|
||||
|
||||
static void i830_fill_box(drm_device_t * dev,
|
||||
int x, int y, int w, int h,
|
||||
int r, int g, int b )
|
||||
int x, int y, int w, int h, int r, int g, int b)
|
||||
{
|
||||
drm_i830_private_t *dev_priv = dev->dev_private;
|
||||
u32 color;
|
||||
|
@ -827,8 +817,7 @@ static void i830_fill_box( drm_device_t *dev,
|
|||
color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
|
||||
} else {
|
||||
color = (((r & 0xf8) << 8) |
|
||||
((g & 0xfc) << 3) |
|
||||
((b & 0xf8) >> 3));
|
||||
((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
|
||||
}
|
||||
|
||||
BEGIN_LP_RING(6);
|
||||
|
@ -876,14 +865,15 @@ static void i830_cp_performance_boxes( drm_device_t *dev )
|
|||
if (!(dev_priv->sarea_priv->perf_boxes & I830_BOX_RING_EMPTY))
|
||||
i830_fill_box(dev, 64, 4, 8, 8, 0, 255, 0);
|
||||
|
||||
|
||||
/* Draw bars indicating number of buffers allocated
|
||||
* (not a great measure, easily confused)
|
||||
*/
|
||||
if (dev_priv->dma_used) {
|
||||
int bar = dev_priv->dma_used / 10240;
|
||||
if (bar > 100) bar = 100;
|
||||
if (bar < 1) bar = 1;
|
||||
if (bar > 100)
|
||||
bar = 100;
|
||||
if (bar < 1)
|
||||
bar = 1;
|
||||
i830_fill_box(dev, 4, 16, bar, 4, 196, 128, 128);
|
||||
dev_priv->dma_used = 0;
|
||||
}
|
||||
|
@ -906,13 +896,14 @@ static void i830_dma_dispatch_clear( drm_device_t *dev, int flags,
|
|||
unsigned int BR13, CMD, D_CMD;
|
||||
RING_LOCALS;
|
||||
|
||||
|
||||
if (dev_priv->current_page == 1) {
|
||||
unsigned int tmp = flags;
|
||||
|
||||
flags &= ~(I830_FRONT | I830_BACK);
|
||||
if ( tmp & I830_FRONT ) flags |= I830_BACK;
|
||||
if ( tmp & I830_BACK ) flags |= I830_FRONT;
|
||||
if (tmp & I830_FRONT)
|
||||
flags |= I830_BACK;
|
||||
if (tmp & I830_BACK)
|
||||
flags |= I830_FRONT;
|
||||
}
|
||||
|
||||
i830_kernel_lost_context(dev);
|
||||
|
@ -944,8 +935,7 @@ static void i830_dma_dispatch_clear( drm_device_t *dev, int flags,
|
|||
for (i = 0; i < nbox; i++, pbox++) {
|
||||
if (pbox->x1 > pbox->x2 ||
|
||||
pbox->y1 > pbox->y2 ||
|
||||
pbox->x2 > dev_priv->w ||
|
||||
pbox->y2 > dev_priv->h)
|
||||
pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
|
||||
continue;
|
||||
|
||||
if (flags & I830_FRONT) {
|
||||
|
@ -1021,21 +1011,17 @@ static void i830_dma_dispatch_swap( drm_device_t *dev )
|
|||
break;
|
||||
}
|
||||
|
||||
|
||||
if (nbox > I830_NR_SAREA_CLIPRECTS)
|
||||
nbox = I830_NR_SAREA_CLIPRECTS;
|
||||
|
||||
for (i = 0 ; i < nbox; i++, pbox++)
|
||||
{
|
||||
for (i = 0; i < nbox; i++, pbox++) {
|
||||
if (pbox->x1 > pbox->x2 ||
|
||||
pbox->y1 > pbox->y2 ||
|
||||
pbox->x2 > dev_priv->w ||
|
||||
pbox->y2 > dev_priv->h)
|
||||
pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
|
||||
continue;
|
||||
|
||||
DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n",
|
||||
pbox->x1, pbox->y1,
|
||||
pbox->x2, pbox->y2);
|
||||
pbox->x1, pbox->y1, pbox->x2, pbox->y2);
|
||||
|
||||
BEGIN_LP_RING(8);
|
||||
OUT_RING(CMD);
|
||||
|
@ -1077,7 +1063,6 @@ static void i830_dma_dispatch_flip( drm_device_t *dev )
|
|||
i830_cp_performance_boxes(dev);
|
||||
}
|
||||
|
||||
|
||||
BEGIN_LP_RING(2);
|
||||
OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
|
||||
OUT_RING(0);
|
||||
|
@ -1096,21 +1081,16 @@ static void i830_dma_dispatch_flip( drm_device_t *dev )
|
|||
OUT_RING(0);
|
||||
ADVANCE_LP_RING();
|
||||
|
||||
|
||||
BEGIN_LP_RING(2);
|
||||
OUT_RING( MI_WAIT_FOR_EVENT |
|
||||
MI_WAIT_FOR_PLANE_A_FLIP );
|
||||
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
|
||||
OUT_RING(0);
|
||||
ADVANCE_LP_RING();
|
||||
|
||||
|
||||
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
|
||||
}
|
||||
|
||||
static void i830_dma_dispatch_vertex(drm_device_t * dev,
|
||||
drm_buf_t *buf,
|
||||
int discard,
|
||||
int used)
|
||||
drm_buf_t * buf, int discard, int used)
|
||||
{
|
||||
drm_i830_private_t *dev_priv = dev->dev_private;
|
||||
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
|
||||
|
@ -1155,8 +1135,7 @@ static void i830_dma_dispatch_vertex(drm_device_t *dev,
|
|||
u32 *vp = buf_priv->kernel_virtual;
|
||||
|
||||
vp[0] = (GFX_OP_PRIMITIVE |
|
||||
sarea_priv->vertex_prim |
|
||||
((used/4)-2));
|
||||
sarea_priv->vertex_prim | ((used / 4) - 2));
|
||||
|
||||
if (dev_priv->use_mi_batchbuffer_start) {
|
||||
vp[used / 4] = MI_BATCH_BUFFER_END;
|
||||
|
@ -1176,10 +1155,12 @@ static void i830_dma_dispatch_vertex(drm_device_t *dev,
|
|||
if (i < nbox) {
|
||||
BEGIN_LP_RING(6);
|
||||
OUT_RING(GFX_OP_DRAWRECT_INFO);
|
||||
OUT_RING( sarea_priv->BufferState[I830_DESTREG_DR1] );
|
||||
OUT_RING(sarea_priv->
|
||||
BufferState[I830_DESTREG_DR1]);
|
||||
OUT_RING(box[i].x1 | (box[i].y1 << 16));
|
||||
OUT_RING(box[i].x2 | (box[i].y2 << 16));
|
||||
OUT_RING( sarea_priv->BufferState[I830_DESTREG_DR4] );
|
||||
OUT_RING(sarea_priv->
|
||||
BufferState[I830_DESTREG_DR4]);
|
||||
OUT_RING(0);
|
||||
ADVANCE_LP_RING();
|
||||
}
|
||||
|
@ -1189,8 +1170,7 @@ static void i830_dma_dispatch_vertex(drm_device_t *dev,
|
|||
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
|
||||
OUT_RING(start | MI_BATCH_NON_SECURE);
|
||||
ADVANCE_LP_RING();
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
BEGIN_LP_RING(4);
|
||||
OUT_RING(MI_BATCH_BUFFER);
|
||||
OUT_RING(start | MI_BATCH_NON_SECURE);
|
||||
|
@ -1221,7 +1201,6 @@ static void i830_dma_dispatch_vertex(drm_device_t *dev,
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
void i830_dma_quiescent(drm_device_t * dev)
|
||||
{
|
||||
drm_i830_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -1279,9 +1258,12 @@ void i830_reclaim_buffers( struct file *filp )
|
|||
drm_device_dma_t *dma = dev->dma;
|
||||
int i;
|
||||
|
||||
if (!dma) return;
|
||||
if (!dev->dev_private) return;
|
||||
if (!dma->buflist) return;
|
||||
if (!dma)
|
||||
return;
|
||||
if (!dev->dev_private)
|
||||
return;
|
||||
if (!dma->buflist)
|
||||
return;
|
||||
|
||||
i830_flush_queue(dev);
|
||||
|
||||
|
@ -1328,7 +1310,8 @@ int i830_dma_vertex(struct inode *inode, struct file *filp,
|
|||
dev_priv->sarea_priv;
|
||||
drm_i830_vertex_t vertex;
|
||||
|
||||
if (copy_from_user(&vertex, (drm_i830_vertex_t __user *)arg, sizeof(vertex)))
|
||||
if (copy_from_user
|
||||
(&vertex, (drm_i830_vertex_t __user *) arg, sizeof(vertex)))
|
||||
return -EFAULT;
|
||||
|
||||
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
|
||||
|
@ -1339,7 +1322,8 @@ int i830_dma_vertex(struct inode *inode, struct file *filp,
|
|||
DRM_DEBUG("i830 dma vertex, idx %d used %d discard %d\n",
|
||||
vertex.idx, vertex.used, vertex.discard);
|
||||
|
||||
if(vertex.idx < 0 || vertex.idx > dma->buf_count) return -EINVAL;
|
||||
if (vertex.idx < 0 || vertex.idx > dma->buf_count)
|
||||
return -EINVAL;
|
||||
|
||||
i830_dma_dispatch_vertex(dev,
|
||||
dma->buflist[vertex.idx],
|
||||
|
@ -1358,7 +1342,8 @@ int i830_clear_bufs(struct inode *inode, struct file *filp,
|
|||
drm_device_t *dev = priv->dev;
|
||||
drm_i830_clear_t clear;
|
||||
|
||||
if (copy_from_user(&clear, (drm_i830_clear_t __user *)arg, sizeof(clear)))
|
||||
if (copy_from_user
|
||||
(&clear, (drm_i830_clear_t __user *) arg, sizeof(clear)))
|
||||
return -EFAULT;
|
||||
|
||||
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
|
||||
|
@ -1373,8 +1358,7 @@ int i830_clear_bufs(struct inode *inode, struct file *filp,
|
|||
|
||||
i830_dma_dispatch_clear(dev, clear.flags,
|
||||
clear.clear_color,
|
||||
clear.clear_depth,
|
||||
clear.clear_depthmask);
|
||||
clear.clear_depth, clear.clear_depthmask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1395,8 +1379,6 @@ int i830_swap_bufs(struct inode *inode, struct file *filp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* Not sure why this isn't set all the time:
|
||||
*/
|
||||
static void i830_do_init_pageflip(drm_device_t * dev)
|
||||
|
@ -1492,9 +1474,7 @@ int i830_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
|
|||
}
|
||||
|
||||
int i830_copybuf(struct inode *inode,
|
||||
struct file *filp,
|
||||
unsigned int cmd,
|
||||
unsigned long arg)
|
||||
struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
/* Never copy - 2.4.x doesn't need it */
|
||||
return 0;
|
||||
|
@ -1506,8 +1486,6 @@ int i830_docopy(struct inode *inode, struct file *filp, unsigned int cmd,
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
int i830_getparam(struct inode *inode, struct file *filp, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
|
@ -1522,7 +1500,8 @@ int i830_getparam( struct inode *inode, struct file *filp, unsigned int cmd,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (copy_from_user(¶m, (drm_i830_getparam_t __user *)arg, sizeof(param) ))
|
||||
if (copy_from_user
|
||||
(¶m, (drm_i830_getparam_t __user *) arg, sizeof(param)))
|
||||
return -EFAULT;
|
||||
|
||||
switch (param.param) {
|
||||
|
@ -1541,7 +1520,6 @@ int i830_getparam( struct inode *inode, struct file *filp, unsigned int cmd,
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int i830_setparam(struct inode *inode, struct file *filp, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
|
@ -1555,7 +1533,8 @@ int i830_setparam( struct inode *inode, struct file *filp, unsigned int cmd,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (copy_from_user(¶m, (drm_i830_setparam_t __user *)arg, sizeof(param) ))
|
||||
if (copy_from_user
|
||||
(¶m, (drm_i830_setparam_t __user *) arg, sizeof(param)))
|
||||
return -EFAULT;
|
||||
|
||||
switch (param.param) {
|
||||
|
@ -1569,7 +1548,6 @@ int i830_setparam( struct inode *inode, struct file *filp, unsigned int cmd,
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void i830_driver_pretakedown(drm_device_t * dev)
|
||||
{
|
||||
i830_dma_cleanup(dev);
|
||||
|
@ -1585,4 +1563,3 @@ int i830_driver_dma_quiescent(drm_device_t *dev)
|
|||
i830_dma_quiescent(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -116,7 +116,6 @@
|
|||
#define I830_STPREG_ST1 1
|
||||
#define I830_STP_SETUP_SIZE 2
|
||||
|
||||
|
||||
/* Texture state (per tex unit)
|
||||
*/
|
||||
|
||||
|
@ -247,7 +246,6 @@ typedef struct _drm_i830_sarea {
|
|||
#define I830_BOX_TEXTURE_LOAD 0x8 /* populated by kernel */
|
||||
#define I830_BOX_LOST_CONTEXT 0x10 /* populated by client */
|
||||
|
||||
|
||||
/* I830 specific ioctls
|
||||
* The device specific ioctl range is 0x40 to 0x79.
|
||||
*/
|
||||
|
@ -289,8 +287,6 @@ typedef struct _drm_i830_clear {
|
|||
unsigned int clear_depthmask;
|
||||
} drm_i830_clear_t;
|
||||
|
||||
|
||||
|
||||
/* These may be placeholders if we have more cliprects than
|
||||
* I830_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
|
||||
* false, indicating that the buffer will be dispatched again with a
|
||||
|
@ -315,7 +311,6 @@ typedef struct drm_i830_dma {
|
|||
int granted;
|
||||
} drm_i830_dma_t;
|
||||
|
||||
|
||||
/* 1.3: Userspace can request & wait on irq's:
|
||||
*/
|
||||
typedef struct drm_i830_irq_emit {
|
||||
|
@ -326,7 +321,6 @@ typedef struct drm_i830_irq_wait {
|
|||
int irq_seq;
|
||||
} drm_i830_irq_wait_t;
|
||||
|
||||
|
||||
/* 1.3: New ioctl to query kernel params:
|
||||
*/
|
||||
#define I830_PARAM_IRQ_ACTIVE 1
|
||||
|
@ -336,7 +330,6 @@ typedef struct drm_i830_getparam {
|
|||
int __user *value;
|
||||
} drm_i830_getparam_t;
|
||||
|
||||
|
||||
/* 1.3: New ioctl to set kernel params:
|
||||
*/
|
||||
#define I830_SETPARAM_USE_MI_BATCHBUFFER_START 1
|
||||
|
@ -346,5 +339,4 @@ typedef struct drm_i830_setparam {
|
|||
int value;
|
||||
} drm_i830_setparam_t;
|
||||
|
||||
|
||||
#endif /* _I830_DRM_H_ */
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "i830_drm.h"
|
||||
|
@ -53,9 +54,7 @@ int postinit( struct drm_device *dev, unsigned long flags )
|
|||
DRIVER_MAJOR,
|
||||
DRIVER_MINOR,
|
||||
DRIVER_PATCHLEVEL,
|
||||
DRIVER_DATE,
|
||||
dev->minor,
|
||||
pci_pretty_name(dev->pdev)
|
||||
DRIVER_DATE, dev->minor, pci_pretty_name(dev->pdev)
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
|
@ -95,7 +94,9 @@ static drm_ioctl_desc_t ioctls[] = {
|
|||
};
|
||||
|
||||
static struct drm_driver_fn driver_fn = {
|
||||
.driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
|
||||
DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
|
||||
#if USE_IRQS
|
||||
.driver_features |= DRIVER_HAVE_IRQ | DRIVER_SHARED_IRQ,
|
||||
#endif
|
||||
|
@ -123,7 +124,8 @@ static struct drm_driver_fn driver_fn = {
|
|||
.ioctl = drm_ioctl,
|
||||
.mmap = i830_mmap_buffers,
|
||||
.fasync = drm_fasync,
|
||||
},
|
||||
}
|
||||
,
|
||||
};
|
||||
|
||||
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
|
|
@ -184,8 +184,6 @@ extern int i830_driver_dma_quiescent(drm_device_t *dev);
|
|||
#define I830_READ16(reg) I830_DEREF16(reg)
|
||||
#define I830_WRITE16(reg,val) do { I830_DEREF16(reg) = val; } while (0)
|
||||
|
||||
|
||||
|
||||
#define I830_VERBOSE 0
|
||||
|
||||
#define RING_LOCALS unsigned int outring, ringmask, outcount; \
|
||||
|
@ -203,7 +201,6 @@ extern int i830_driver_dma_quiescent(drm_device_t *dev);
|
|||
virt = dev_priv->ring.virtual_start; \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define OUT_RING(n) do { \
|
||||
if (I830_VERBOSE) printk(" OUT_RING %x\n", (int)(n)); \
|
||||
*(volatile unsigned int *)(virt + outring) = n; \
|
||||
|
@ -221,7 +218,6 @@ extern int i830_driver_dma_quiescent(drm_device_t *dev);
|
|||
|
||||
extern int i830_wait_ring(drm_device_t * dev, int n, const char *caller);
|
||||
|
||||
|
||||
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
|
||||
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
|
||||
#define CMD_REPORT_HEAD (7<<23)
|
||||
|
@ -235,7 +231,6 @@ extern int i830_wait_ring(drm_device_t *dev, int n, const char *caller);
|
|||
#define INST_OP_FLUSH 0x02000000
|
||||
#define INST_FLUSH_MAP_CACHE 0x00000001
|
||||
|
||||
|
||||
#define BB1_START_ADDR_MASK (~0x7)
|
||||
#define BB1_PROTECTED (1<<0)
|
||||
#define BB1_UNPROTECTED (0<<0)
|
||||
|
@ -248,7 +243,6 @@ extern int i830_wait_ring(drm_device_t *dev, int n, const char *caller);
|
|||
|
||||
#define I830_IRQ_RESERVED ((1<<13)|(3<<2))
|
||||
|
||||
|
||||
#define LP_RING 0x2030
|
||||
#define HP_RING 0x2040
|
||||
#define RING_TAIL 0x00
|
||||
|
@ -332,4 +326,3 @@ extern int i830_wait_ring(drm_device_t *dev, int n, const char *caller);
|
|||
#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23))
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -26,13 +26,13 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <linux/interrupt.h> /* For task queue support */
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "i830_drm.h"
|
||||
#include "i830_drv.h"
|
||||
#include <linux/interrupt.h> /* For task queue support */
|
||||
#include <linux/delay.h>
|
||||
|
||||
|
||||
irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
|
||||
{
|
||||
|
@ -54,7 +54,6 @@ irqreturn_t i830_driver_irq_handler( DRM_IRQ_ARGS )
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
||||
int i830_emit_irq(drm_device_t * dev)
|
||||
{
|
||||
drm_i830_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -72,11 +71,9 @@ int i830_emit_irq(drm_device_t *dev)
|
|||
return atomic_read(&dev_priv->irq_emitted);
|
||||
}
|
||||
|
||||
|
||||
int i830_wait_irq(drm_device_t * dev, int irq_nr)
|
||||
{
|
||||
drm_i830_private_t *dev_priv =
|
||||
(drm_i830_private_t *)dev->dev_private;
|
||||
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
|
||||
DECLARE_WAITQUEUE(entry, current);
|
||||
unsigned long end = jiffies + HZ * 3;
|
||||
int ret = 0;
|
||||
|
@ -116,7 +113,6 @@ int i830_wait_irq(drm_device_t *dev, int irq_nr)
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/* Needs the lock as it touches the ring.
|
||||
*/
|
||||
int i830_irq_emit(struct inode *inode, struct file *filp, unsigned int cmd,
|
||||
|
@ -138,7 +134,8 @@ int i830_irq_emit( struct inode *inode, struct file *filp, unsigned int cmd,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (copy_from_user( &emit, (drm_i830_irq_emit_t __user *)arg, sizeof(emit) ))
|
||||
if (copy_from_user
|
||||
(&emit, (drm_i830_irq_emit_t __user *) arg, sizeof(emit)))
|
||||
return -EFAULT;
|
||||
|
||||
result = i830_emit_irq(dev);
|
||||
|
@ -151,7 +148,6 @@ int i830_irq_emit( struct inode *inode, struct file *filp, unsigned int cmd,
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* Doesn't need the hardware lock.
|
||||
*/
|
||||
int i830_irq_wait(struct inode *inode, struct file *filp, unsigned int cmd,
|
||||
|
@ -174,12 +170,11 @@ int i830_irq_wait( struct inode *inode, struct file *filp, unsigned int cmd,
|
|||
return i830_wait_irq(dev, irqwait.irq_seq);
|
||||
}
|
||||
|
||||
|
||||
/* drm_dma.h hooks
|
||||
*/
|
||||
void i830_driver_irq_preinstall( drm_device_t *dev ) {
|
||||
drm_i830_private_t *dev_priv =
|
||||
(drm_i830_private_t *)dev->dev_private;
|
||||
void i830_driver_irq_preinstall(drm_device_t * dev)
|
||||
{
|
||||
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
|
||||
|
||||
I830_WRITE16(I830REG_HWSTAM, 0xffff);
|
||||
I830_WRITE16(I830REG_INT_MASK_R, 0x0);
|
||||
|
@ -189,16 +184,16 @@ void i830_driver_irq_preinstall( drm_device_t *dev ) {
|
|||
init_waitqueue_head(&dev_priv->irq_queue);
|
||||
}
|
||||
|
||||
void i830_driver_irq_postinstall( drm_device_t *dev ) {
|
||||
drm_i830_private_t *dev_priv =
|
||||
(drm_i830_private_t *)dev->dev_private;
|
||||
void i830_driver_irq_postinstall(drm_device_t * dev)
|
||||
{
|
||||
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
|
||||
|
||||
I830_WRITE16(I830REG_INT_ENABLE_R, 0x2);
|
||||
}
|
||||
|
||||
void i830_driver_irq_uninstall( drm_device_t *dev ) {
|
||||
drm_i830_private_t *dev_priv =
|
||||
(drm_i830_private_t *)dev->dev_private;
|
||||
void i830_driver_irq_uninstall(drm_device_t * dev)
|
||||
{
|
||||
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
|
||||
if (!dev_priv)
|
||||
return;
|
||||
|
||||
|
|
|
@ -28,9 +28,7 @@ int postinit( struct drm_device *dev, unsigned long flags )
|
|||
DRIVER_MAJOR,
|
||||
DRIVER_MINOR,
|
||||
DRIVER_PATCHLEVEL,
|
||||
DRIVER_DATE,
|
||||
dev->minor,
|
||||
pci_pretty_name(dev->pdev)
|
||||
DRIVER_DATE, dev->minor, pci_pretty_name(dev->pdev)
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
|
@ -68,7 +66,8 @@ static drm_ioctl_desc_t ioctls[] = {
|
|||
};
|
||||
|
||||
static struct drm_driver_fn driver_fn = {
|
||||
.driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
|
||||
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
|
||||
.pretakedown = i915_driver_pretakedown,
|
||||
.prerelease = i915_driver_prerelease,
|
||||
|
|
|
@ -42,9 +42,7 @@ static int postinit( struct drm_device *dev, unsigned long flags )
|
|||
DRIVER_MAJOR,
|
||||
DRIVER_MINOR,
|
||||
DRIVER_PATCHLEVEL,
|
||||
DRIVER_DATE,
|
||||
dev->minor,
|
||||
pci_pretty_name(dev->pdev)
|
||||
DRIVER_DATE, dev->minor, pci_pretty_name(dev->pdev)
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
|
@ -84,7 +82,9 @@ static drm_ioctl_desc_t ioctls[] = {
|
|||
};
|
||||
|
||||
static struct drm_driver_fn driver_fn = {
|
||||
.driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_HAVE_DMA
|
||||
| DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
|
||||
.pretakedown = mach64_driver_pretakedown,
|
||||
.vblank_wait = mach64_driver_vblank_wait,
|
||||
.irq_preinstall = mach64_driver_irq_preinstall,
|
||||
|
|
|
@ -49,9 +49,7 @@ static int postinit( struct drm_device *dev, unsigned long flags )
|
|||
DRIVER_MAJOR,
|
||||
DRIVER_MINOR,
|
||||
DRIVER_PATCHLEVEL,
|
||||
DRIVER_DATE,
|
||||
dev->minor,
|
||||
pci_pretty_name(dev->pdev)
|
||||
DRIVER_DATE, dev->minor, pci_pretty_name(dev->pdev)
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
|
@ -87,7 +85,10 @@ static drm_ioctl_desc_t ioctls[] = {
|
|||
};
|
||||
|
||||
static struct drm_driver_fn driver_fn = {
|
||||
.driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
|
||||
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
|
||||
DRIVER_IRQ_VBL,
|
||||
.pretakedown = mga_driver_pretakedown,
|
||||
.dma_quiescent = mga_driver_dma_quiescent,
|
||||
.vblank_wait = mga_driver_vblank_wait,
|
||||
|
|
|
@ -45,9 +45,7 @@ static int postinit( struct drm_device *dev, unsigned long flags )
|
|||
DRIVER_MAJOR,
|
||||
DRIVER_MINOR,
|
||||
DRIVER_PATCHLEVEL,
|
||||
DRIVER_DATE,
|
||||
dev->minor,
|
||||
pci_pretty_name(dev->pdev)
|
||||
DRIVER_DATE, dev->minor, pci_pretty_name(dev->pdev)
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
|
@ -96,7 +94,10 @@ static drm_ioctl_desc_t ioctls[] = {
|
|||
};
|
||||
|
||||
static struct drm_driver_fn driver_fn = {
|
||||
.driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
|
||||
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
|
||||
DRIVER_IRQ_VBL,
|
||||
.dev_priv_size = sizeof(drm_r128_buf_priv_t),
|
||||
.prerelease = r128_driver_prerelease,
|
||||
.pretakedown = r128_driver_pretakedown,
|
||||
|
@ -120,7 +121,8 @@ static struct drm_driver_fn driver_fn = {
|
|||
.ioctl = drm_ioctl,
|
||||
.mmap = drm_mmap,
|
||||
.fasync = drm_fasync,
|
||||
},
|
||||
}
|
||||
,
|
||||
};
|
||||
|
||||
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
|
||||
#include <linux/config.h>
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
|
@ -46,9 +45,7 @@ static int postinit( struct drm_device *dev, unsigned long flags )
|
|||
DRIVER_MAJOR,
|
||||
DRIVER_MINOR,
|
||||
DRIVER_PATCHLEVEL,
|
||||
DRIVER_DATE,
|
||||
dev->minor,
|
||||
pci_pretty_name(dev->pdev)
|
||||
DRIVER_DATE, dev->minor, pci_pretty_name(dev->pdev)
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
|
@ -132,7 +129,10 @@ static drm_ioctl_desc_t ioctls[] = {
|
|||
};
|
||||
|
||||
static struct drm_driver_fn driver_fn = {
|
||||
.driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
|
||||
DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED |
|
||||
DRIVER_IRQ_VBL,
|
||||
.dev_priv_size = sizeof(drm_radeon_buf_priv_t),
|
||||
.preinit = radeon_preinit,
|
||||
.postinit = radeon_postinit,
|
||||
|
@ -161,7 +161,8 @@ static struct drm_driver_fn driver_fn = {
|
|||
.ioctl = drm_ioctl,
|
||||
.mmap = drm_mmap,
|
||||
.fasync = drm_fasync,
|
||||
},
|
||||
}
|
||||
,
|
||||
};
|
||||
|
||||
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
|
|
@ -25,5 +25,3 @@ struct radeon_i2c_chan {
|
|||
|
||||
extern int radeon_create_i2c_busses(drm_device_t * dev);
|
||||
extern void radeon_delete_i2c_busses(drm_device_t * dev);
|
||||
|
||||
|
||||
|
|
|
@ -22,15 +22,14 @@
|
|||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
|
||||
/*=========================================================*/
|
||||
#include <linux/interrupt.h> /* For task queue support */
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "drmP.h"
|
||||
#include "savage_drm.h"
|
||||
#include "savage_drv.h"
|
||||
|
||||
#include <linux/interrupt.h> /* For task queue support */
|
||||
#include <linux/delay.h>
|
||||
|
||||
#define SAVAGE_DEFAULT_USEC_TIMEOUT 10000
|
||||
#define SAVAGE_FREELIST_DEBUG 0
|
||||
|
||||
|
|
|
@ -34,8 +34,7 @@
|
|||
#define DRM_SAVAGE_MEM_LOCATION_AGP 2
|
||||
#define DRM_SAVAGE_DMA_AGP_SIZE (16*1024*1024)
|
||||
|
||||
typedef struct drm_savage_alloc_cont_mem
|
||||
{
|
||||
typedef struct drm_savage_alloc_cont_mem {
|
||||
size_t size; /*size of buffer */
|
||||
unsigned long type; /*4k page or word */
|
||||
unsigned long alignment;
|
||||
|
@ -45,8 +44,7 @@ typedef struct drm_savage_alloc_cont_mem
|
|||
unsigned long linear;
|
||||
} drm_savage_alloc_cont_mem_t;
|
||||
|
||||
typedef struct drm_savage_get_physcis_address
|
||||
{
|
||||
typedef struct drm_savage_get_physcis_address {
|
||||
unsigned long v_address;
|
||||
unsigned long p_address;
|
||||
} drm_savage_get_physcis_address_t;
|
||||
|
@ -108,7 +106,6 @@ typedef struct {
|
|||
unsigned int pitch;
|
||||
} drm_savage_server_regs_t;
|
||||
|
||||
|
||||
typedef struct _drm_savage_sarea {
|
||||
/* The channel for communication of state information to the kernel
|
||||
* on firing a vertex dma buffer.
|
||||
|
@ -149,10 +146,10 @@ typedef struct _drm_savage_sarea {
|
|||
*/
|
||||
unsigned int status[4];
|
||||
|
||||
|
||||
/* LRU lists for texture memory in agp space and on the card.
|
||||
*/
|
||||
drm_tex_region_t texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS+1];
|
||||
drm_tex_region_t texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS +
|
||||
1];
|
||||
unsigned int texAge[SAVAGE_NR_TEX_HEAPS];
|
||||
|
||||
/* Mechanism to validate card state.
|
||||
|
@ -164,8 +161,6 @@ typedef struct _drm_savage_sarea {
|
|||
unsigned long agp_offset;
|
||||
} drm_savage_sarea_t, *drm_savage_sarea_ptr;
|
||||
|
||||
|
||||
|
||||
typedef struct drm_savage_init {
|
||||
|
||||
unsigned long sarea_priv_offset;
|
||||
|
|
|
@ -39,9 +39,7 @@ static int postinit( struct drm_device *dev, unsigned long flags )
|
|||
DRIVER_MAJOR,
|
||||
DRIVER_MINOR,
|
||||
DRIVER_PATCHLEVEL,
|
||||
DRIVER_DATE,
|
||||
dev->minor,
|
||||
pci_pretty_name(dev->pdev)
|
||||
DRIVER_DATE, dev->minor, pci_pretty_name(dev->pdev)
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -43,9 +43,7 @@ static int postinit( struct drm_device *dev, unsigned long flags )
|
|||
DRIVER_MAJOR,
|
||||
DRIVER_MINOR,
|
||||
DRIVER_PATCHLEVEL,
|
||||
DRIVER_DATE,
|
||||
dev->minor,
|
||||
pci_pretty_name(dev->pdev)
|
||||
DRIVER_DATE, dev->minor, pci_pretty_name(dev->pdev)
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -125,13 +125,11 @@
|
|||
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
|
||||
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
|
||||
|
||||
|
||||
typedef unsigned long drm_handle_t; /**< To mapped regions */
|
||||
typedef unsigned int drm_context_t; /**< GLXContext handle */
|
||||
typedef unsigned int drm_drawable_t;
|
||||
typedef unsigned int drm_magic_t; /**< Magic for authentication */
|
||||
|
||||
|
||||
/**
|
||||
* Cliprect.
|
||||
*
|
||||
|
@ -148,7 +146,6 @@ typedef struct drm_clip_rect {
|
|||
unsigned short y2;
|
||||
} drm_clip_rect_t;
|
||||
|
||||
|
||||
/**
|
||||
* Texture region,
|
||||
*/
|
||||
|
@ -172,7 +169,6 @@ typedef struct drm_hw_lock {
|
|||
char padding[60]; /**< Pad to cache line */
|
||||
} drm_hw_lock_t;
|
||||
|
||||
|
||||
/* This is beyond ugly, and only works on GCC. However, it allows me to use
|
||||
* drm.h in places (i.e., in the X-server) where I can't use size_t. The real
|
||||
* fix is to use uint32_t instead of size_t, but that fix will break existing
|
||||
|
@ -206,7 +202,6 @@ typedef struct drm_version {
|
|||
char __user *desc; /**< User-space buffer to hold desc */
|
||||
} drm_version_t;
|
||||
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_GET_UNIQUE ioctl argument type.
|
||||
*
|
||||
|
@ -224,12 +219,10 @@ typedef struct drm_list {
|
|||
drm_version_t __user *version;
|
||||
} drm_list_t;
|
||||
|
||||
|
||||
typedef struct drm_block {
|
||||
int unused;
|
||||
} drm_block_t;
|
||||
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_CONTROL ioctl argument type.
|
||||
*
|
||||
|
@ -245,7 +238,6 @@ typedef struct drm_control {
|
|||
int irq;
|
||||
} drm_control_t;
|
||||
|
||||
|
||||
/**
|
||||
* Type of memory to map.
|
||||
*/
|
||||
|
@ -257,7 +249,6 @@ typedef enum drm_map_type {
|
|||
_DRM_SCATTER_GATHER = 4 /**< Scatter/gather memory for PCI DMA */
|
||||
} drm_map_type_t;
|
||||
|
||||
|
||||
/**
|
||||
* Memory mapping flags.
|
||||
*/
|
||||
|
@ -271,13 +262,11 @@ typedef enum drm_map_flags {
|
|||
_DRM_REMOVABLE = 0x40 /**< Removable mapping */
|
||||
} drm_map_flags_t;
|
||||
|
||||
|
||||
typedef struct drm_ctx_priv_map {
|
||||
unsigned int ctx_id; /**< Context requesting private mapping */
|
||||
void *handle; /**< Handle of map */
|
||||
} drm_ctx_priv_map_t;
|
||||
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
|
||||
* argument type.
|
||||
|
@ -295,7 +284,6 @@ typedef struct drm_map {
|
|||
/* Private data */
|
||||
} drm_map_t;
|
||||
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_GET_CLIENT ioctl argument type.
|
||||
*/
|
||||
|
@ -308,7 +296,6 @@ typedef struct drm_client {
|
|||
unsigned long iocs; /**< Ioctl count */
|
||||
} drm_client_t;
|
||||
|
||||
|
||||
typedef enum {
|
||||
_DRM_STAT_LOCK,
|
||||
_DRM_STAT_OPENS,
|
||||
|
@ -326,11 +313,9 @@ typedef enum {
|
|||
_DRM_STAT_DMA, /**< DMA */
|
||||
_DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
|
||||
_DRM_STAT_MISSED /**< Missed DMA opportunity */
|
||||
|
||||
/* Add to the *END* of the list */
|
||||
} drm_stat_type_t;
|
||||
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_GET_STATS ioctl argument type.
|
||||
*/
|
||||
|
@ -342,7 +327,6 @@ typedef struct drm_stats {
|
|||
} data[15];
|
||||
} drm_stats_t;
|
||||
|
||||
|
||||
/**
|
||||
* Hardware locking flags.
|
||||
*/
|
||||
|
@ -358,7 +342,6 @@ typedef enum drm_lock_flags {
|
|||
_DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
|
||||
} drm_lock_flags_t;
|
||||
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
|
||||
*
|
||||
|
@ -369,7 +352,6 @@ typedef struct drm_lock {
|
|||
drm_lock_flags_t flags;
|
||||
} drm_lock_t;
|
||||
|
||||
|
||||
/**
|
||||
* DMA flags
|
||||
*
|
||||
|
@ -399,7 +381,6 @@ typedef enum drm_dma_flags {
|
|||
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
|
||||
} drm_dma_flags_t;
|
||||
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
|
||||
*
|
||||
|
@ -421,7 +402,6 @@ typedef struct drm_buf_desc {
|
|||
*/
|
||||
} drm_buf_desc_t;
|
||||
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_INFO_BUFS ioctl argument type.
|
||||
*/
|
||||
|
@ -430,7 +410,6 @@ typedef struct drm_buf_info {
|
|||
drm_buf_desc_t __user *list; /**< List of buffer descriptions */
|
||||
} drm_buf_info_t;
|
||||
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_FREE_BUFS ioctl argument type.
|
||||
*/
|
||||
|
@ -439,7 +418,6 @@ typedef struct drm_buf_free {
|
|||
int __user *list;
|
||||
} drm_buf_free_t;
|
||||
|
||||
|
||||
/**
|
||||
* Buffer information
|
||||
*
|
||||
|
@ -452,7 +430,6 @@ typedef struct drm_buf_pub {
|
|||
void __user *address; /**< Address of buffer */
|
||||
} drm_buf_pub_t;
|
||||
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_MAP_BUFS ioctl argument type.
|
||||
*/
|
||||
|
@ -462,7 +439,6 @@ typedef struct drm_buf_map {
|
|||
drm_buf_pub_t __user *list; /**< Buffer information */
|
||||
} drm_buf_map_t;
|
||||
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_DMA ioctl argument type.
|
||||
*
|
||||
|
@ -483,13 +459,11 @@ typedef struct drm_dma {
|
|||
int granted_count; /**< Number of buffers granted */
|
||||
} drm_dma_t;
|
||||
|
||||
|
||||
typedef enum {
|
||||
_DRM_CONTEXT_PRESERVED = 0x01,
|
||||
_DRM_CONTEXT_2DONLY = 0x02
|
||||
} drm_ctx_flags_t;
|
||||
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_ADD_CTX ioctl argument type.
|
||||
*
|
||||
|
@ -500,7 +474,6 @@ typedef struct drm_ctx {
|
|||
drm_ctx_flags_t flags;
|
||||
} drm_ctx_t;
|
||||
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_RES_CTX ioctl argument type.
|
||||
*/
|
||||
|
@ -509,7 +482,6 @@ typedef struct drm_ctx_res {
|
|||
drm_ctx_t __user *contexts;
|
||||
} drm_ctx_res_t;
|
||||
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
|
||||
*/
|
||||
|
@ -517,7 +489,6 @@ typedef struct drm_draw {
|
|||
drm_drawable_t handle;
|
||||
} drm_draw_t;
|
||||
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
|
||||
*/
|
||||
|
@ -525,7 +496,6 @@ typedef struct drm_auth {
|
|||
drm_magic_t magic;
|
||||
} drm_auth_t;
|
||||
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_IRQ_BUSID ioctl argument type.
|
||||
*
|
||||
|
@ -538,24 +508,20 @@ typedef struct drm_irq_busid {
|
|||
int funcnum; /**< function number */
|
||||
} drm_irq_busid_t;
|
||||
|
||||
|
||||
typedef enum {
|
||||
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
|
||||
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
|
||||
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
|
||||
} drm_vblank_seq_type_t;
|
||||
|
||||
|
||||
#define _DRM_VBLANK_FLAGS_MASK _DRM_VBLANK_SIGNAL
|
||||
|
||||
|
||||
struct drm_wait_vblank_request {
|
||||
drm_vblank_seq_type_t type;
|
||||
unsigned int sequence;
|
||||
unsigned long signal;
|
||||
};
|
||||
|
||||
|
||||
struct drm_wait_vblank_reply {
|
||||
drm_vblank_seq_type_t type;
|
||||
unsigned int sequence;
|
||||
|
@ -563,7 +529,6 @@ struct drm_wait_vblank_reply {
|
|||
long tval_usec;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_WAIT_VBLANK ioctl argument type.
|
||||
*
|
||||
|
@ -574,7 +539,6 @@ typedef union drm_wait_vblank {
|
|||
struct drm_wait_vblank_reply reply;
|
||||
} drm_wait_vblank_t;
|
||||
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_AGP_ENABLE ioctl argument type.
|
||||
*
|
||||
|
@ -584,7 +548,6 @@ typedef struct drm_agp_mode {
|
|||
unsigned long mode; /**< AGP mode */
|
||||
} drm_agp_mode_t;
|
||||
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
|
||||
*
|
||||
|
@ -597,7 +560,6 @@ typedef struct drm_agp_buffer {
|
|||
unsigned long physical; /**< Physical used by i810 */
|
||||
} drm_agp_buffer_t;
|
||||
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
|
||||
*
|
||||
|
@ -608,7 +570,6 @@ typedef struct drm_agp_binding {
|
|||
unsigned long offset; /**< In bytes -- will round to page boundary */
|
||||
} drm_agp_binding_t;
|
||||
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_AGP_INFO ioctl argument type.
|
||||
*
|
||||
|
@ -632,7 +593,6 @@ typedef struct drm_agp_info {
|
|||
/*@} */
|
||||
} drm_agp_info_t;
|
||||
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_SG_ALLOC ioctl argument type.
|
||||
*/
|
||||
|
@ -651,7 +611,6 @@ typedef struct drm_set_version {
|
|||
int drm_dd_minor;
|
||||
} drm_set_version_t;
|
||||
|
||||
|
||||
/**
|
||||
* \name Ioctls Definitions
|
||||
*/
|
||||
|
@ -719,7 +678,6 @@ typedef struct drm_set_version {
|
|||
|
||||
/*@}*/
|
||||
|
||||
|
||||
/**
|
||||
* Device specific ioctls should only be in their respective headers
|
||||
* The device specific ioctl range is from 0x40 to 0x79.
|
||||
|
|
|
@ -74,7 +74,8 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
|
|||
{
|
||||
/* Maybe cut off the start of an existing block */
|
||||
if (start > p->start) {
|
||||
struct mem_block *newblock = drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
|
||||
struct mem_block *newblock =
|
||||
drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
|
||||
if (!newblock)
|
||||
goto out;
|
||||
newblock->start = start;
|
||||
|
@ -90,7 +91,8 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
|
|||
|
||||
/* Maybe cut off the end of an existing block */
|
||||
if (size < p->size) {
|
||||
struct mem_block *newblock = drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
|
||||
struct mem_block *newblock =
|
||||
drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
|
||||
if (!newblock)
|
||||
goto out;
|
||||
newblock->start = start + size;
|
||||
|
|
|
@ -36,7 +36,6 @@
|
|||
#include "mach64_drm.h"
|
||||
#include "mach64_drv.h"
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* Engine, FIFO control
|
||||
*/
|
||||
|
@ -46,13 +45,14 @@ int mach64_do_wait_for_fifo( drm_mach64_private_t *dev_priv, int entries )
|
|||
int slots = 0, i;
|
||||
|
||||
for (i = 0; i < dev_priv->usec_timeout; i++) {
|
||||
slots = (MACH64_READ( MACH64_FIFO_STAT ) &
|
||||
MACH64_FIFO_SLOT_MASK);
|
||||
if ( slots <= (0x8000 >> entries) ) return 0;
|
||||
slots = (MACH64_READ(MACH64_FIFO_STAT) & MACH64_FIFO_SLOT_MASK);
|
||||
if (slots <= (0x8000 >> entries))
|
||||
return 0;
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
|
||||
DRM_INFO( "%s failed! slots=%d entries=%d\n", __FUNCTION__, slots, entries );
|
||||
DRM_INFO("%s failed! slots=%d entries=%d\n", __FUNCTION__, slots,
|
||||
entries);
|
||||
return DRM_ERR(EBUSY);
|
||||
}
|
||||
|
||||
|
@ -61,7 +61,8 @@ int mach64_do_wait_for_idle( drm_mach64_private_t *dev_priv )
|
|||
int i, ret;
|
||||
|
||||
ret = mach64_do_wait_for_fifo(dev_priv, 16);
|
||||
if ( ret < 0 ) return ret;
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < dev_priv->usec_timeout; i++) {
|
||||
if (!(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE)) {
|
||||
|
@ -161,8 +162,9 @@ int mach64_do_dma_idle( drm_mach64_private_t *dev_priv )
|
|||
|
||||
/* wait for completion */
|
||||
if ((ret = mach64_ring_idle(dev_priv)) < 0) {
|
||||
DRM_ERROR( "%s failed BM_GUI_TABLE=0x%08x tail: %u\n", __FUNCTION__,
|
||||
MACH64_READ(MACH64_BM_GUI_TABLE), dev_priv->ring.tail );
|
||||
DRM_ERROR("%s failed BM_GUI_TABLE=0x%08x tail: %u\n",
|
||||
__FUNCTION__, MACH64_READ(MACH64_BM_GUI_TABLE),
|
||||
dev_priv->ring.tail);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -184,19 +186,16 @@ int mach64_do_engine_reset( drm_mach64_private_t *dev_priv )
|
|||
/* Kill off any outstanding DMA transfers.
|
||||
*/
|
||||
tmp = MACH64_READ(MACH64_BUS_CNTL);
|
||||
MACH64_WRITE( MACH64_BUS_CNTL,
|
||||
tmp | MACH64_BUS_MASTER_DIS );
|
||||
MACH64_WRITE(MACH64_BUS_CNTL, tmp | MACH64_BUS_MASTER_DIS);
|
||||
|
||||
/* Reset the GUI engine (high to low transition).
|
||||
*/
|
||||
tmp = MACH64_READ(MACH64_GEN_TEST_CNTL);
|
||||
MACH64_WRITE( MACH64_GEN_TEST_CNTL,
|
||||
tmp & ~MACH64_GUI_ENGINE_ENABLE );
|
||||
MACH64_WRITE(MACH64_GEN_TEST_CNTL, tmp & ~MACH64_GUI_ENGINE_ENABLE);
|
||||
/* Enable the GUI engine
|
||||
*/
|
||||
tmp = MACH64_READ(MACH64_GEN_TEST_CNTL);
|
||||
MACH64_WRITE( MACH64_GEN_TEST_CNTL,
|
||||
tmp | MACH64_GUI_ENGINE_ENABLE );
|
||||
MACH64_WRITE(MACH64_GEN_TEST_CNTL, tmp | MACH64_GUI_ENGINE_ENABLE);
|
||||
|
||||
/* ensure engine is not locked up by clearing any FIFO or HOST errors
|
||||
*/
|
||||
|
@ -219,91 +218,142 @@ int mach64_do_engine_reset( drm_mach64_private_t *dev_priv )
|
|||
void mach64_dump_engine_info(drm_mach64_private_t * dev_priv)
|
||||
{
|
||||
DRM_INFO("\n");
|
||||
if ( !dev_priv->is_pci )
|
||||
{
|
||||
DRM_INFO( " AGP_BASE = 0x%08x\n", MACH64_READ( MACH64_AGP_BASE ) );
|
||||
DRM_INFO( " AGP_CNTL = 0x%08x\n", MACH64_READ( MACH64_AGP_CNTL ) );
|
||||
if (!dev_priv->is_pci) {
|
||||
DRM_INFO(" AGP_BASE = 0x%08x\n",
|
||||
MACH64_READ(MACH64_AGP_BASE));
|
||||
DRM_INFO(" AGP_CNTL = 0x%08x\n",
|
||||
MACH64_READ(MACH64_AGP_CNTL));
|
||||
}
|
||||
DRM_INFO( " ALPHA_TST_CNTL = 0x%08x\n", MACH64_READ( MACH64_ALPHA_TST_CNTL ) );
|
||||
DRM_INFO(" ALPHA_TST_CNTL = 0x%08x\n",
|
||||
MACH64_READ(MACH64_ALPHA_TST_CNTL));
|
||||
DRM_INFO("\n");
|
||||
DRM_INFO( " BM_COMMAND = 0x%08x\n", MACH64_READ( MACH64_BM_COMMAND ) );
|
||||
DRM_INFO( "BM_FRAME_BUF_OFFSET = 0x%08x\n", MACH64_READ( MACH64_BM_FRAME_BUF_OFFSET ) );
|
||||
DRM_INFO( " BM_GUI_TABLE = 0x%08x\n", MACH64_READ( MACH64_BM_GUI_TABLE ) );
|
||||
DRM_INFO( " BM_STATUS = 0x%08x\n", MACH64_READ( MACH64_BM_STATUS ) );
|
||||
DRM_INFO( " BM_SYSTEM_MEM_ADDR = 0x%08x\n", MACH64_READ( MACH64_BM_SYSTEM_MEM_ADDR ) );
|
||||
DRM_INFO( " BM_SYSTEM_TABLE = 0x%08x\n", MACH64_READ( MACH64_BM_SYSTEM_TABLE ) );
|
||||
DRM_INFO( " BUS_CNTL = 0x%08x\n", MACH64_READ( MACH64_BUS_CNTL ) );
|
||||
DRM_INFO(" BM_COMMAND = 0x%08x\n",
|
||||
MACH64_READ(MACH64_BM_COMMAND));
|
||||
DRM_INFO("BM_FRAME_BUF_OFFSET = 0x%08x\n",
|
||||
MACH64_READ(MACH64_BM_FRAME_BUF_OFFSET));
|
||||
DRM_INFO(" BM_GUI_TABLE = 0x%08x\n",
|
||||
MACH64_READ(MACH64_BM_GUI_TABLE));
|
||||
DRM_INFO(" BM_STATUS = 0x%08x\n",
|
||||
MACH64_READ(MACH64_BM_STATUS));
|
||||
DRM_INFO(" BM_SYSTEM_MEM_ADDR = 0x%08x\n",
|
||||
MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR));
|
||||
DRM_INFO(" BM_SYSTEM_TABLE = 0x%08x\n",
|
||||
MACH64_READ(MACH64_BM_SYSTEM_TABLE));
|
||||
DRM_INFO(" BUS_CNTL = 0x%08x\n",
|
||||
MACH64_READ(MACH64_BUS_CNTL));
|
||||
DRM_INFO("\n");
|
||||
/* DRM_INFO( " CLOCK_CNTL = 0x%08x\n", MACH64_READ( MACH64_CLOCK_CNTL ) ); */
|
||||
DRM_INFO( " CLR_CMP_CLR = 0x%08x\n", MACH64_READ( MACH64_CLR_CMP_CLR ) );
|
||||
DRM_INFO( " CLR_CMP_CNTL = 0x%08x\n", MACH64_READ( MACH64_CLR_CMP_CNTL ) );
|
||||
DRM_INFO(" CLR_CMP_CLR = 0x%08x\n",
|
||||
MACH64_READ(MACH64_CLR_CMP_CLR));
|
||||
DRM_INFO(" CLR_CMP_CNTL = 0x%08x\n",
|
||||
MACH64_READ(MACH64_CLR_CMP_CNTL));
|
||||
/* DRM_INFO( " CLR_CMP_MSK = 0x%08x\n", MACH64_READ( MACH64_CLR_CMP_MSK ) ); */
|
||||
DRM_INFO( " CONFIG_CHIP_ID = 0x%08x\n", MACH64_READ( MACH64_CONFIG_CHIP_ID ) );
|
||||
DRM_INFO( " CONFIG_CNTL = 0x%08x\n", MACH64_READ( MACH64_CONFIG_CNTL ) );
|
||||
DRM_INFO( " CONFIG_STAT0 = 0x%08x\n", MACH64_READ( MACH64_CONFIG_STAT0 ) );
|
||||
DRM_INFO( " CONFIG_STAT1 = 0x%08x\n", MACH64_READ( MACH64_CONFIG_STAT1 ) );
|
||||
DRM_INFO( " CONFIG_STAT2 = 0x%08x\n", MACH64_READ( MACH64_CONFIG_STAT2 ) );
|
||||
DRM_INFO(" CONFIG_CHIP_ID = 0x%08x\n",
|
||||
MACH64_READ(MACH64_CONFIG_CHIP_ID));
|
||||
DRM_INFO(" CONFIG_CNTL = 0x%08x\n",
|
||||
MACH64_READ(MACH64_CONFIG_CNTL));
|
||||
DRM_INFO(" CONFIG_STAT0 = 0x%08x\n",
|
||||
MACH64_READ(MACH64_CONFIG_STAT0));
|
||||
DRM_INFO(" CONFIG_STAT1 = 0x%08x\n",
|
||||
MACH64_READ(MACH64_CONFIG_STAT1));
|
||||
DRM_INFO(" CONFIG_STAT2 = 0x%08x\n",
|
||||
MACH64_READ(MACH64_CONFIG_STAT2));
|
||||
DRM_INFO(" CRC_SIG = 0x%08x\n", MACH64_READ(MACH64_CRC_SIG));
|
||||
DRM_INFO( " CUSTOM_MACRO_CNTL = 0x%08x\n", MACH64_READ( MACH64_CUSTOM_MACRO_CNTL ) );
|
||||
DRM_INFO(" CUSTOM_MACRO_CNTL = 0x%08x\n",
|
||||
MACH64_READ(MACH64_CUSTOM_MACRO_CNTL));
|
||||
DRM_INFO("\n");
|
||||
/* DRM_INFO( " DAC_CNTL = 0x%08x\n", MACH64_READ( MACH64_DAC_CNTL ) ); */
|
||||
/* DRM_INFO( " DAC_REGS = 0x%08x\n", MACH64_READ( MACH64_DAC_REGS ) ); */
|
||||
DRM_INFO( " DP_BKGD_CLR = 0x%08x\n", MACH64_READ( MACH64_DP_BKGD_CLR ) );
|
||||
DRM_INFO( " DP_FRGD_CLR = 0x%08x\n", MACH64_READ( MACH64_DP_FRGD_CLR ) );
|
||||
DRM_INFO(" DP_BKGD_CLR = 0x%08x\n",
|
||||
MACH64_READ(MACH64_DP_BKGD_CLR));
|
||||
DRM_INFO(" DP_FRGD_CLR = 0x%08x\n",
|
||||
MACH64_READ(MACH64_DP_FRGD_CLR));
|
||||
DRM_INFO(" DP_MIX = 0x%08x\n", MACH64_READ(MACH64_DP_MIX));
|
||||
DRM_INFO( " DP_PIX_WIDTH = 0x%08x\n", MACH64_READ( MACH64_DP_PIX_WIDTH ) );
|
||||
DRM_INFO(" DP_PIX_WIDTH = 0x%08x\n",
|
||||
MACH64_READ(MACH64_DP_PIX_WIDTH));
|
||||
DRM_INFO(" DP_SRC = 0x%08x\n", MACH64_READ(MACH64_DP_SRC));
|
||||
DRM_INFO( " DP_WRITE_MASK = 0x%08x\n", MACH64_READ( MACH64_DP_WRITE_MASK ) );
|
||||
DRM_INFO( " DSP_CONFIG = 0x%08x\n", MACH64_READ( MACH64_DSP_CONFIG ) );
|
||||
DRM_INFO( " DSP_ON_OFF = 0x%08x\n", MACH64_READ( MACH64_DSP_ON_OFF ) );
|
||||
DRM_INFO( " DST_CNTL = 0x%08x\n", MACH64_READ( MACH64_DST_CNTL ) );
|
||||
DRM_INFO( " DST_OFF_PITCH = 0x%08x\n", MACH64_READ( MACH64_DST_OFF_PITCH ) );
|
||||
DRM_INFO(" DP_WRITE_MASK = 0x%08x\n",
|
||||
MACH64_READ(MACH64_DP_WRITE_MASK));
|
||||
DRM_INFO(" DSP_CONFIG = 0x%08x\n",
|
||||
MACH64_READ(MACH64_DSP_CONFIG));
|
||||
DRM_INFO(" DSP_ON_OFF = 0x%08x\n",
|
||||
MACH64_READ(MACH64_DSP_ON_OFF));
|
||||
DRM_INFO(" DST_CNTL = 0x%08x\n",
|
||||
MACH64_READ(MACH64_DST_CNTL));
|
||||
DRM_INFO(" DST_OFF_PITCH = 0x%08x\n",
|
||||
MACH64_READ(MACH64_DST_OFF_PITCH));
|
||||
DRM_INFO("\n");
|
||||
/* DRM_INFO( " EXT_DAC_REGS = 0x%08x\n", MACH64_READ( MACH64_EXT_DAC_REGS ) ); */
|
||||
DRM_INFO( " EXT_MEM_CNTL = 0x%08x\n", MACH64_READ( MACH64_EXT_MEM_CNTL ) );
|
||||
DRM_INFO(" EXT_MEM_CNTL = 0x%08x\n",
|
||||
MACH64_READ(MACH64_EXT_MEM_CNTL));
|
||||
DRM_INFO("\n");
|
||||
DRM_INFO( " FIFO_STAT = 0x%08x\n", MACH64_READ( MACH64_FIFO_STAT ) );
|
||||
DRM_INFO(" FIFO_STAT = 0x%08x\n",
|
||||
MACH64_READ(MACH64_FIFO_STAT));
|
||||
DRM_INFO("\n");
|
||||
DRM_INFO( " GEN_TEST_CNTL = 0x%08x\n", MACH64_READ( MACH64_GEN_TEST_CNTL ) );
|
||||
DRM_INFO(" GEN_TEST_CNTL = 0x%08x\n",
|
||||
MACH64_READ(MACH64_GEN_TEST_CNTL));
|
||||
/* DRM_INFO( " GP_IO = 0x%08x\n", MACH64_READ( MACH64_GP_IO ) ); */
|
||||
DRM_INFO( " GUI_CMDFIFO_DATA = 0x%08x\n", MACH64_READ( MACH64_GUI_CMDFIFO_DATA ) );
|
||||
DRM_INFO( " GUI_CMDFIFO_DEBUG = 0x%08x\n", MACH64_READ( MACH64_GUI_CMDFIFO_DEBUG ) );
|
||||
DRM_INFO( " GUI_CNTL = 0x%08x\n", MACH64_READ( MACH64_GUI_CNTL ) );
|
||||
DRM_INFO( " GUI_STAT = 0x%08x\n", MACH64_READ( MACH64_GUI_STAT ) );
|
||||
DRM_INFO( " GUI_TRAJ_CNTL = 0x%08x\n", MACH64_READ( MACH64_GUI_TRAJ_CNTL ) );
|
||||
DRM_INFO(" GUI_CMDFIFO_DATA = 0x%08x\n",
|
||||
MACH64_READ(MACH64_GUI_CMDFIFO_DATA));
|
||||
DRM_INFO(" GUI_CMDFIFO_DEBUG = 0x%08x\n",
|
||||
MACH64_READ(MACH64_GUI_CMDFIFO_DEBUG));
|
||||
DRM_INFO(" GUI_CNTL = 0x%08x\n",
|
||||
MACH64_READ(MACH64_GUI_CNTL));
|
||||
DRM_INFO(" GUI_STAT = 0x%08x\n",
|
||||
MACH64_READ(MACH64_GUI_STAT));
|
||||
DRM_INFO(" GUI_TRAJ_CNTL = 0x%08x\n",
|
||||
MACH64_READ(MACH64_GUI_TRAJ_CNTL));
|
||||
DRM_INFO("\n");
|
||||
DRM_INFO( " HOST_CNTL = 0x%08x\n", MACH64_READ( MACH64_HOST_CNTL ) );
|
||||
DRM_INFO( " HW_DEBUG = 0x%08x\n", MACH64_READ( MACH64_HW_DEBUG ) );
|
||||
DRM_INFO(" HOST_CNTL = 0x%08x\n",
|
||||
MACH64_READ(MACH64_HOST_CNTL));
|
||||
DRM_INFO(" HW_DEBUG = 0x%08x\n",
|
||||
MACH64_READ(MACH64_HW_DEBUG));
|
||||
DRM_INFO("\n");
|
||||
DRM_INFO( " MEM_ADDR_CONFIG = 0x%08x\n", MACH64_READ( MACH64_MEM_ADDR_CONFIG ) );
|
||||
DRM_INFO( " MEM_BUF_CNTL = 0x%08x\n", MACH64_READ( MACH64_MEM_BUF_CNTL ) );
|
||||
DRM_INFO(" MEM_ADDR_CONFIG = 0x%08x\n",
|
||||
MACH64_READ(MACH64_MEM_ADDR_CONFIG));
|
||||
DRM_INFO(" MEM_BUF_CNTL = 0x%08x\n",
|
||||
MACH64_READ(MACH64_MEM_BUF_CNTL));
|
||||
DRM_INFO("\n");
|
||||
DRM_INFO( " PAT_REG0 = 0x%08x\n", MACH64_READ( MACH64_PAT_REG0 ) );
|
||||
DRM_INFO( " PAT_REG1 = 0x%08x\n", MACH64_READ( MACH64_PAT_REG1 ) );
|
||||
DRM_INFO(" PAT_REG0 = 0x%08x\n",
|
||||
MACH64_READ(MACH64_PAT_REG0));
|
||||
DRM_INFO(" PAT_REG1 = 0x%08x\n",
|
||||
MACH64_READ(MACH64_PAT_REG1));
|
||||
DRM_INFO("\n");
|
||||
DRM_INFO(" SC_LEFT = 0x%08x\n", MACH64_READ(MACH64_SC_LEFT));
|
||||
DRM_INFO( " SC_RIGHT = 0x%08x\n", MACH64_READ( MACH64_SC_RIGHT ) );
|
||||
DRM_INFO(" SC_RIGHT = 0x%08x\n",
|
||||
MACH64_READ(MACH64_SC_RIGHT));
|
||||
DRM_INFO(" SC_TOP = 0x%08x\n", MACH64_READ(MACH64_SC_TOP));
|
||||
DRM_INFO( " SC_BOTTOM = 0x%08x\n", MACH64_READ( MACH64_SC_BOTTOM ) );
|
||||
DRM_INFO(" SC_BOTTOM = 0x%08x\n",
|
||||
MACH64_READ(MACH64_SC_BOTTOM));
|
||||
DRM_INFO("\n");
|
||||
DRM_INFO( " SCALE_3D_CNTL = 0x%08x\n", MACH64_READ( MACH64_SCALE_3D_CNTL ) );
|
||||
DRM_INFO( " SCRATCH_REG0 = 0x%08x\n", MACH64_READ( MACH64_SCRATCH_REG0 ) );
|
||||
DRM_INFO( " SCRATCH_REG1 = 0x%08x\n", MACH64_READ( MACH64_SCRATCH_REG1 ) );
|
||||
DRM_INFO( " SETUP_CNTL = 0x%08x\n", MACH64_READ( MACH64_SETUP_CNTL ) );
|
||||
DRM_INFO( " SRC_CNTL = 0x%08x\n", MACH64_READ( MACH64_SRC_CNTL ) );
|
||||
DRM_INFO(" SCALE_3D_CNTL = 0x%08x\n",
|
||||
MACH64_READ(MACH64_SCALE_3D_CNTL));
|
||||
DRM_INFO(" SCRATCH_REG0 = 0x%08x\n",
|
||||
MACH64_READ(MACH64_SCRATCH_REG0));
|
||||
DRM_INFO(" SCRATCH_REG1 = 0x%08x\n",
|
||||
MACH64_READ(MACH64_SCRATCH_REG1));
|
||||
DRM_INFO(" SETUP_CNTL = 0x%08x\n",
|
||||
MACH64_READ(MACH64_SETUP_CNTL));
|
||||
DRM_INFO(" SRC_CNTL = 0x%08x\n",
|
||||
MACH64_READ(MACH64_SRC_CNTL));
|
||||
DRM_INFO("\n");
|
||||
DRM_INFO( " TEX_CNTL = 0x%08x\n", MACH64_READ( MACH64_TEX_CNTL ) );
|
||||
DRM_INFO( " TEX_SIZE_PITCH = 0x%08x\n", MACH64_READ( MACH64_TEX_SIZE_PITCH ) );
|
||||
DRM_INFO( " TIMER_CONFIG = 0x%08x\n", MACH64_READ( MACH64_TIMER_CONFIG ) );
|
||||
DRM_INFO(" TEX_CNTL = 0x%08x\n",
|
||||
MACH64_READ(MACH64_TEX_CNTL));
|
||||
DRM_INFO(" TEX_SIZE_PITCH = 0x%08x\n",
|
||||
MACH64_READ(MACH64_TEX_SIZE_PITCH));
|
||||
DRM_INFO(" TIMER_CONFIG = 0x%08x\n",
|
||||
MACH64_READ(MACH64_TIMER_CONFIG));
|
||||
DRM_INFO("\n");
|
||||
DRM_INFO(" Z_CNTL = 0x%08x\n", MACH64_READ(MACH64_Z_CNTL));
|
||||
DRM_INFO( " Z_OFF_PITCH = 0x%08x\n", MACH64_READ( MACH64_Z_OFF_PITCH ) );
|
||||
DRM_INFO(" Z_OFF_PITCH = 0x%08x\n",
|
||||
MACH64_READ(MACH64_Z_OFF_PITCH));
|
||||
DRM_INFO("\n");
|
||||
}
|
||||
|
||||
#define MACH64_DUMP_CONTEXT 3
|
||||
|
||||
static void mach64_dump_buf_info( drm_mach64_private_t *dev_priv, drm_buf_t *buf)
|
||||
static void mach64_dump_buf_info(drm_mach64_private_t * dev_priv,
|
||||
drm_buf_t * buf)
|
||||
{
|
||||
u32 addr = GETBUFADDR(buf);
|
||||
u32 used = buf->used >> 2;
|
||||
|
@ -320,7 +370,8 @@ static void mach64_dump_buf_info( drm_mach64_private_t *dev_priv, drm_buf_t *buf
|
|||
if (addr <= GETBUFADDR(buf) + MACH64_DUMP_CONTEXT * 4 ||
|
||||
(addr >= sys_addr - MACH64_DUMP_CONTEXT * 4 &&
|
||||
addr <= sys_addr + MACH64_DUMP_CONTEXT * 4) ||
|
||||
addr >= GETBUFADDR( buf ) + buf->used - MACH64_DUMP_CONTEXT * 4) {
|
||||
addr >=
|
||||
GETBUFADDR(buf) + buf->used - MACH64_DUMP_CONTEXT * 4) {
|
||||
DRM_INFO("%08x: 0x%08x\n", addr, reg);
|
||||
}
|
||||
addr += 4;
|
||||
|
@ -333,8 +384,11 @@ static void mach64_dump_buf_info( drm_mach64_private_t *dev_priv, drm_buf_t *buf
|
|||
if (addr <= GETBUFADDR(buf) + MACH64_DUMP_CONTEXT * 4 ||
|
||||
(addr >= sys_addr - MACH64_DUMP_CONTEXT * 4 &&
|
||||
addr <= sys_addr + MACH64_DUMP_CONTEXT * 4) ||
|
||||
addr >= GETBUFADDR( buf ) + buf->used - MACH64_DUMP_CONTEXT * 4) {
|
||||
DRM_INFO("%08x: 0x%04x = 0x%08x\n", addr, reg, le32_to_cpu(*p));
|
||||
addr >=
|
||||
GETBUFADDR(buf) + buf->used -
|
||||
MACH64_DUMP_CONTEXT * 4) {
|
||||
DRM_INFO("%08x: 0x%04x = 0x%08x\n", addr,
|
||||
reg, le32_to_cpu(*p));
|
||||
skipped = 0;
|
||||
} else {
|
||||
if (!skipped) {
|
||||
|
@ -380,8 +434,7 @@ void mach64_dump_ring_info( drm_mach64_private_t *dev_priv )
|
|||
le32_to_cpu(((u32 *) ring->start)[i + 2]),
|
||||
le32_to_cpu(((u32 *) ring->start)[i + 3]),
|
||||
i == ring->head ? " (head)" : "",
|
||||
i == ring->tail ? " (tail)" : ""
|
||||
);
|
||||
i == ring->tail ? " (tail)" : "");
|
||||
skipped = 0;
|
||||
} else {
|
||||
if (!skipped) {
|
||||
|
@ -411,20 +464,28 @@ void mach64_dump_ring_info( drm_mach64_private_t *dev_priv )
|
|||
}
|
||||
|
||||
DRM_INFO("\n");
|
||||
DRM_INFO( " BM_GUI_TABLE = 0x%08x\n", MACH64_READ( MACH64_BM_GUI_TABLE ) );
|
||||
DRM_INFO(" BM_GUI_TABLE = 0x%08x\n",
|
||||
MACH64_READ(MACH64_BM_GUI_TABLE));
|
||||
DRM_INFO("\n");
|
||||
DRM_INFO( "BM_FRAME_BUF_OFFSET = 0x%08x\n", MACH64_READ( MACH64_BM_FRAME_BUF_OFFSET ) );
|
||||
DRM_INFO( " BM_SYSTEM_MEM_ADDR = 0x%08x\n", MACH64_READ( MACH64_BM_SYSTEM_MEM_ADDR ) );
|
||||
DRM_INFO( " BM_COMMAND = 0x%08x\n", MACH64_READ( MACH64_BM_COMMAND ) );
|
||||
DRM_INFO("BM_FRAME_BUF_OFFSET = 0x%08x\n",
|
||||
MACH64_READ(MACH64_BM_FRAME_BUF_OFFSET));
|
||||
DRM_INFO(" BM_SYSTEM_MEM_ADDR = 0x%08x\n",
|
||||
MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR));
|
||||
DRM_INFO(" BM_COMMAND = 0x%08x\n",
|
||||
MACH64_READ(MACH64_BM_COMMAND));
|
||||
DRM_INFO("\n");
|
||||
DRM_INFO( " BM_STATUS = 0x%08x\n", MACH64_READ( MACH64_BM_STATUS ) );
|
||||
DRM_INFO( " BUS_CNTL = 0x%08x\n", MACH64_READ( MACH64_BUS_CNTL ) );
|
||||
DRM_INFO( " FIFO_STAT = 0x%08x\n", MACH64_READ( MACH64_FIFO_STAT ) );
|
||||
DRM_INFO( " GUI_STAT = 0x%08x\n", MACH64_READ( MACH64_GUI_STAT ) );
|
||||
DRM_INFO( " SRC_CNTL = 0x%08x\n", MACH64_READ( MACH64_SRC_CNTL ) );
|
||||
DRM_INFO(" BM_STATUS = 0x%08x\n",
|
||||
MACH64_READ(MACH64_BM_STATUS));
|
||||
DRM_INFO(" BUS_CNTL = 0x%08x\n",
|
||||
MACH64_READ(MACH64_BUS_CNTL));
|
||||
DRM_INFO(" FIFO_STAT = 0x%08x\n",
|
||||
MACH64_READ(MACH64_FIFO_STAT));
|
||||
DRM_INFO(" GUI_STAT = 0x%08x\n",
|
||||
MACH64_READ(MACH64_GUI_STAT));
|
||||
DRM_INFO(" SRC_CNTL = 0x%08x\n",
|
||||
MACH64_READ(MACH64_SRC_CNTL));
|
||||
}
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* DMA test and initialization
|
||||
*/
|
||||
|
@ -446,7 +507,8 @@ static int mach64_bm_dma_test( drm_device_t *dev )
|
|||
|
||||
/* FIXME: get a dma buffer from the freelist here */
|
||||
DRM_DEBUG("Allocating data memory ...\n");
|
||||
cpu_addr_data = drm_pci_alloc( dev, 0x1000, 0x1000, 0xfffffffful, &data_handle );
|
||||
cpu_addr_data =
|
||||
drm_pci_alloc(dev, 0x1000, 0x1000, 0xfffffffful, &data_handle);
|
||||
if (!cpu_addr_data || !data_handle) {
|
||||
DRM_INFO("data-memory allocation failed!\n");
|
||||
return DRM_ERR(ENOMEM);
|
||||
|
@ -493,7 +555,8 @@ static int mach64_bm_dma_test( drm_device_t *dev )
|
|||
data[count++] = expected[1] = 0xaaaaaaaa;
|
||||
|
||||
while (count < 1020) {
|
||||
data[count++] = cpu_to_le32(DMAREG(MACH64_PAT_REG0) | (1 << 16));
|
||||
data[count++] =
|
||||
cpu_to_le32(DMAREG(MACH64_PAT_REG0) | (1 << 16));
|
||||
data[count++] = 0x22222222;
|
||||
data[count++] = 0xaaaaaaaa;
|
||||
}
|
||||
|
@ -547,8 +610,7 @@ static int mach64_bm_dma_test( drm_device_t *dev )
|
|||
|
||||
DRM_DEBUG("starting DMA transfer...\n");
|
||||
MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
|
||||
dev_priv->ring.start_addr |
|
||||
MACH64_CIRCULAR_BUF_SIZE_16KB );
|
||||
dev_priv->ring.start_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
|
||||
|
||||
MACH64_WRITE(MACH64_SRC_CNTL,
|
||||
MACH64_SRC_BM_ENABLE | MACH64_SRC_BM_SYNC |
|
||||
|
@ -605,7 +667,6 @@ static int mach64_bm_dma_test( drm_device_t *dev )
|
|||
return failed;
|
||||
}
|
||||
|
||||
|
||||
static int mach64_do_dma_init(drm_device_t * dev, drm_mach64_init_t * init)
|
||||
{
|
||||
drm_mach64_private_t *dev_priv;
|
||||
|
@ -670,8 +731,7 @@ static int mach64_do_dma_init( drm_device_t *dev, drm_mach64_init_t *init )
|
|||
}
|
||||
|
||||
dev_priv->sarea_priv = (drm_mach64_sarea_t *)
|
||||
((u8 *)dev_priv->sarea->handle +
|
||||
init->sarea_priv_offset);
|
||||
((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
|
||||
|
||||
if (!dev_priv->is_pci) {
|
||||
dev_priv->ring_map = drm_core_findmap(dev, init->ring_offset);
|
||||
|
@ -689,7 +749,8 @@ static int mach64_do_dma_init( drm_device_t *dev, drm_mach64_init_t *init )
|
|||
mach64_do_cleanup_dma(dev);
|
||||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
|
||||
dev->agp_buffer_map =
|
||||
drm_core_findmap(dev, init->buffers_offset);
|
||||
if (!dev->agp_buffer_map) {
|
||||
DRM_ERROR("can not find dma buffer map!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
@ -708,7 +769,8 @@ static int mach64_do_dma_init( drm_device_t *dev, drm_mach64_init_t *init )
|
|||
mach64_do_cleanup_dma(dev);
|
||||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
dev_priv->agp_textures = drm_core_findmap(dev, init->agp_textures_offset);
|
||||
dev_priv->agp_textures =
|
||||
drm_core_findmap(dev, init->agp_textures_offset);
|
||||
if (!dev_priv->agp_textures) {
|
||||
DRM_ERROR("can not find agp texture region!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
@ -727,15 +789,17 @@ static int mach64_do_dma_init( drm_device_t *dev, drm_mach64_init_t *init )
|
|||
DRM_INFO("Setting FIFO size to 128 entries\n");
|
||||
/* FIFO must be empty to change the FIFO depth */
|
||||
if ((ret = mach64_do_wait_for_idle(dev_priv))) {
|
||||
DRM_ERROR("wait for idle failed before changing FIFO depth!\n");
|
||||
DRM_ERROR
|
||||
("wait for idle failed before changing FIFO depth!\n");
|
||||
mach64_do_cleanup_dma(dev);
|
||||
return ret;
|
||||
}
|
||||
MACH64_WRITE( MACH64_GUI_CNTL, ( ( tmp & ~MACH64_CMDFIFO_SIZE_MASK ) \
|
||||
MACH64_WRITE(MACH64_GUI_CNTL, ((tmp & ~MACH64_CMDFIFO_SIZE_MASK)
|
||||
| MACH64_CMDFIFO_SIZE_128));
|
||||
/* need to read GUI_STAT for proper sync according to docs */
|
||||
if ((ret = mach64_do_wait_for_idle(dev_priv))) {
|
||||
DRM_ERROR("wait for idle failed when changing FIFO depth!\n");
|
||||
DRM_ERROR
|
||||
("wait for idle failed when changing FIFO depth!\n");
|
||||
mach64_do_cleanup_dma(dev);
|
||||
return ret;
|
||||
}
|
||||
|
@ -747,7 +811,8 @@ static int mach64_do_dma_init( drm_device_t *dev, drm_mach64_init_t *init )
|
|||
|
||||
if (dev_priv->is_pci) {
|
||||
dev_priv->ring.start = drm_pci_alloc(dev, dev_priv->ring.size,
|
||||
dev_priv->ring.size, 0xfffffffful,
|
||||
dev_priv->ring.size,
|
||||
0xfffffffful,
|
||||
&dev_priv->ring.handle);
|
||||
|
||||
if (!dev_priv->ring.start || !dev_priv->ring.handle) {
|
||||
|
@ -769,8 +834,7 @@ static int mach64_do_dma_init( drm_device_t *dev, drm_mach64_init_t *init )
|
|||
if (dev_priv->driver_mode != MACH64_MODE_MMIO) {
|
||||
|
||||
/* enable block 1 registers and bus mastering */
|
||||
MACH64_WRITE( MACH64_BUS_CNTL,
|
||||
( ( MACH64_READ(MACH64_BUS_CNTL)
|
||||
MACH64_WRITE(MACH64_BUS_CNTL, ((MACH64_READ(MACH64_BUS_CNTL)
|
||||
| MACH64_BUS_EXT_REG_EN)
|
||||
& ~MACH64_BUS_MASTER_DIS));
|
||||
|
||||
|
@ -789,7 +853,9 @@ static int mach64_do_dma_init( drm_device_t *dev, drm_mach64_init_t *init )
|
|||
if (init->dma_mode == MACH64_MODE_MMIO)
|
||||
DRM_INFO("Forcing pseudo-DMA mode\n");
|
||||
else
|
||||
DRM_INFO( "DMA test failed (ret=%d), using pseudo-DMA mode\n", ret );
|
||||
DRM_INFO
|
||||
("DMA test failed (ret=%d), using pseudo-DMA mode\n",
|
||||
ret);
|
||||
break;
|
||||
case MACH64_MODE_DMA_SYNC:
|
||||
DRM_INFO("DMA test succeeded, using synchronous DMA mode\n");
|
||||
|
@ -810,7 +876,8 @@ static int mach64_do_dma_init( drm_device_t *dev, drm_mach64_init_t *init )
|
|||
/* setup physical address and size of descriptor table */
|
||||
mach64_do_wait_for_fifo(dev_priv, 1);
|
||||
MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
|
||||
( dev_priv->ring.head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB ) );
|
||||
(dev_priv->ring.
|
||||
head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB));
|
||||
|
||||
/* init frame counter */
|
||||
dev_priv->sarea_priv->frames_queued = 0;
|
||||
|
@ -847,7 +914,8 @@ int mach64_do_dispatch_pseudo_dma( drm_mach64_private_t *dev_priv )
|
|||
target = MACH64_BM_ADDR;
|
||||
|
||||
if ((ret = mach64_do_wait_for_idle(dev_priv)) < 0) {
|
||||
DRM_INFO( "%s: idle failed before pseudo-dma dispatch, resetting engine\n",
|
||||
DRM_INFO
|
||||
("%s: idle failed before pseudo-dma dispatch, resetting engine\n",
|
||||
__FUNCTION__);
|
||||
mach64_dump_engine_info(dev_priv);
|
||||
mach64_do_engine_reset(dev_priv);
|
||||
|
@ -862,7 +930,8 @@ int mach64_do_dispatch_pseudo_dma( drm_mach64_private_t *dev_priv )
|
|||
|
||||
head = ring->head;
|
||||
|
||||
new_target = le32_to_cpu( ring_read[head++] ) - MACH64_APERTURE_OFFSET;
|
||||
new_target =
|
||||
le32_to_cpu(ring_read[head++]) - MACH64_APERTURE_OFFSET;
|
||||
buf_addr = le32_to_cpu(ring_read[head++]);
|
||||
eol = le32_to_cpu(ring_read[head]) & MACH64_DMA_EOL;
|
||||
bytes = le32_to_cpu(ring_read[head++])
|
||||
|
@ -873,15 +942,15 @@ int mach64_do_dispatch_pseudo_dma( drm_mach64_private_t *dev_priv )
|
|||
/* can't wait for idle between a blit setup descriptor
|
||||
* and a HOSTDATA descriptor or the engine will lock
|
||||
*/
|
||||
if (new_target == MACH64_BM_HOSTDATA && target == MACH64_BM_ADDR)
|
||||
if (new_target == MACH64_BM_HOSTDATA
|
||||
&& target == MACH64_BM_ADDR)
|
||||
no_idle_wait = 1;
|
||||
|
||||
target = new_target;
|
||||
|
||||
found = 0;
|
||||
offset = 0;
|
||||
list_for_each(ptr, &dev_priv->pending)
|
||||
{
|
||||
list_for_each(ptr, &dev_priv->pending) {
|
||||
entry = list_entry(ptr, drm_mach64_freelist_t, list);
|
||||
buf = entry->buf;
|
||||
offset = buf_addr - GETBUFADDR(buf);
|
||||
|
@ -892,7 +961,8 @@ int mach64_do_dispatch_pseudo_dma( drm_mach64_private_t *dev_priv )
|
|||
}
|
||||
|
||||
if (!found || buf == NULL) {
|
||||
DRM_ERROR("Couldn't find pending buffer: head: %u tail: %u buf_addr: 0x%08x %s\n",
|
||||
DRM_ERROR
|
||||
("Couldn't find pending buffer: head: %u tail: %u buf_addr: 0x%08x %s\n",
|
||||
head, ring->tail, buf_addr, (eol ? "eol" : ""));
|
||||
mach64_dump_ring_info(dev_priv);
|
||||
mach64_do_engine_reset(dev_priv);
|
||||
|
@ -903,8 +973,10 @@ int mach64_do_dispatch_pseudo_dma( drm_mach64_private_t *dev_priv )
|
|||
* every 16 writes
|
||||
*/
|
||||
DRM_DEBUG("target: (0x%08x) %s\n", target,
|
||||
(target == MACH64_BM_HOSTDATA ? "BM_HOSTDATA" : "BM_ADDR"));
|
||||
DRM_DEBUG("offset: %u bytes: %u used: %u\n", offset, bytes, buf->used);
|
||||
(target ==
|
||||
MACH64_BM_HOSTDATA ? "BM_HOSTDATA" : "BM_ADDR"));
|
||||
DRM_DEBUG("offset: %u bytes: %u used: %u\n", offset, bytes,
|
||||
buf->used);
|
||||
|
||||
remaining = (buf->used - offset) >> 2; /* dwords remaining in buffer */
|
||||
used = bytes >> 2; /* dwords in buffer for this descriptor */
|
||||
|
@ -915,7 +987,8 @@ int mach64_do_dispatch_pseudo_dma( drm_mach64_private_t *dev_priv )
|
|||
if (count == 0) {
|
||||
if (target == MACH64_BM_HOSTDATA) {
|
||||
reg = DMAREG(MACH64_HOST_DATA0);
|
||||
count = (remaining > 16) ? 16 : remaining;
|
||||
count =
|
||||
(remaining > 16) ? 16 : remaining;
|
||||
fifo = 0;
|
||||
} else {
|
||||
reg = le32_to_cpu(*buf_ptr++);
|
||||
|
@ -929,12 +1002,16 @@ int mach64_do_dispatch_pseudo_dma( drm_mach64_private_t *dev_priv )
|
|||
while (count && used) {
|
||||
if (!fifo) {
|
||||
if (no_idle_wait) {
|
||||
if ( (ret=mach64_do_wait_for_fifo( dev_priv, 16 )) < 0 ) {
|
||||
if ((ret =
|
||||
mach64_do_wait_for_fifo
|
||||
(dev_priv, 16)) < 0) {
|
||||
no_idle_wait = 0;
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
if ( (ret=mach64_do_wait_for_idle( dev_priv )) < 0 ) {
|
||||
if ((ret =
|
||||
mach64_do_wait_for_idle
|
||||
(dev_priv)) < 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
@ -942,7 +1019,8 @@ int mach64_do_dispatch_pseudo_dma( drm_mach64_private_t *dev_priv )
|
|||
}
|
||||
--fifo;
|
||||
MACH64_WRITE(reg, le32_to_cpu(*buf_ptr++));
|
||||
used--; remaining--;
|
||||
used--;
|
||||
remaining--;
|
||||
|
||||
reg += 4;
|
||||
count--;
|
||||
|
@ -975,15 +1053,18 @@ int mach64_do_cleanup_dma( drm_device_t *dev )
|
|||
* may not have been called from userspace and after dev_private
|
||||
* is freed, it's too late.
|
||||
*/
|
||||
if ( dev->irq ) drm_irq_uninstall(dev);
|
||||
if (dev->irq)
|
||||
drm_irq_uninstall(dev);
|
||||
|
||||
if (dev->dev_private) {
|
||||
drm_mach64_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->is_pci) {
|
||||
if ( (dev_priv->ring.start != NULL) && dev_priv->ring.handle ) {
|
||||
if ((dev_priv->ring.start != NULL)
|
||||
&& dev_priv->ring.handle) {
|
||||
drm_pci_free(dev, dev_priv->ring.size,
|
||||
dev_priv->ring.start, dev_priv->ring.handle );
|
||||
dev_priv->ring.start,
|
||||
dev_priv->ring.handle);
|
||||
}
|
||||
} else {
|
||||
if (dev_priv->ring_map)
|
||||
|
@ -1067,7 +1148,6 @@ int mach64_engine_reset( DRM_IOCTL_ARGS )
|
|||
return mach64_do_engine_reset(dev_priv);
|
||||
}
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* Freelist management
|
||||
*/
|
||||
|
@ -1080,11 +1160,13 @@ int mach64_init_freelist( drm_device_t *dev )
|
|||
struct list_head *ptr;
|
||||
int i;
|
||||
|
||||
DRM_DEBUG("%s: adding %d buffers to freelist\n", __FUNCTION__, dma->buf_count);
|
||||
DRM_DEBUG("%s: adding %d buffers to freelist\n", __FUNCTION__,
|
||||
dma->buf_count);
|
||||
|
||||
for (i = 0; i < dma->buf_count; i++) {
|
||||
if ((entry =
|
||||
(drm_mach64_freelist_t *) drm_alloc(sizeof(drm_mach64_freelist_t),
|
||||
(drm_mach64_freelist_t *)
|
||||
drm_alloc(sizeof(drm_mach64_freelist_t),
|
||||
DRM_MEM_BUFLISTS)) == NULL)
|
||||
return DRM_ERR(ENOMEM);
|
||||
memset(entry, 0, sizeof(drm_mach64_freelist_t));
|
||||
|
@ -1105,21 +1187,18 @@ void mach64_destroy_freelist( drm_device_t *dev )
|
|||
|
||||
DRM_DEBUG("%s\n", __FUNCTION__);
|
||||
|
||||
list_for_each_safe(ptr, tmp, &dev_priv->pending)
|
||||
{
|
||||
list_for_each_safe(ptr, tmp, &dev_priv->pending) {
|
||||
list_del(ptr);
|
||||
entry = list_entry(ptr, drm_mach64_freelist_t, list);
|
||||
drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS);
|
||||
}
|
||||
list_for_each_safe(ptr, tmp, &dev_priv->placeholders)
|
||||
{
|
||||
list_for_each_safe(ptr, tmp, &dev_priv->placeholders) {
|
||||
list_del(ptr);
|
||||
entry = list_entry(ptr, drm_mach64_freelist_t, list);
|
||||
drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS);
|
||||
}
|
||||
|
||||
list_for_each_safe(ptr, tmp, &dev_priv->free_list)
|
||||
{
|
||||
list_for_each_safe(ptr, tmp, &dev_priv->free_list) {
|
||||
list_del(ptr);
|
||||
entry = list_entry(ptr, drm_mach64_freelist_t, list);
|
||||
drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS);
|
||||
|
@ -1141,8 +1220,7 @@ int mach64_do_release_used_buffers( drm_mach64_private_t *dev_priv )
|
|||
|
||||
/* Iterate the pending list and move all buffers into the freelist... */
|
||||
i = 0;
|
||||
list_for_each_safe(ptr, tmp, &dev_priv->pending)
|
||||
{
|
||||
list_for_each_safe(ptr, tmp, &dev_priv->pending) {
|
||||
entry = list_entry(ptr, drm_mach64_freelist_t, list);
|
||||
if (entry->discard) {
|
||||
entry->buf->pending = 0;
|
||||
|
@ -1152,7 +1230,8 @@ int mach64_do_release_used_buffers( drm_mach64_private_t *dev_priv )
|
|||
}
|
||||
}
|
||||
|
||||
DRM_DEBUG( "%s: released %d buffers from pending list\n", __FUNCTION__, i );
|
||||
DRM_DEBUG("%s: released %d buffers from pending list\n", __FUNCTION__,
|
||||
i);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1169,7 +1248,8 @@ drm_buf_t *mach64_freelist_get( drm_mach64_private_t *dev_priv )
|
|||
u32 head, tail, ofs;
|
||||
|
||||
if (list_empty(&dev_priv->pending)) {
|
||||
DRM_ERROR( "Couldn't get buffer - pending and free lists empty\n" );
|
||||
DRM_ERROR
|
||||
("Couldn't get buffer - pending and free lists empty\n");
|
||||
t = 0;
|
||||
list_for_each(ptr, &dev_priv->placeholders) {
|
||||
t++;
|
||||
|
@ -1185,17 +1265,22 @@ drm_buf_t *mach64_freelist_get( drm_mach64_private_t *dev_priv )
|
|||
|
||||
if (head == tail) {
|
||||
#if MACH64_EXTRA_CHECKING
|
||||
if ( MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE ) {
|
||||
DRM_ERROR( "Empty ring with non-idle engine!\n" );
|
||||
if (MACH64_READ(MACH64_GUI_STAT) &
|
||||
MACH64_GUI_ACTIVE) {
|
||||
DRM_ERROR
|
||||
("Empty ring with non-idle engine!\n");
|
||||
mach64_dump_ring_info(dev_priv);
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
/* last pass is complete, so release everything */
|
||||
mach64_do_release_used_buffers(dev_priv);
|
||||
DRM_DEBUG( "%s: idle engine, freed all buffers.\n", __FUNCTION__ );
|
||||
DRM_DEBUG
|
||||
("%s: idle engine, freed all buffers.\n",
|
||||
__FUNCTION__);
|
||||
if (list_empty(&dev_priv->free_list)) {
|
||||
DRM_ERROR( "Freelist empty with idle engine\n" );
|
||||
DRM_ERROR
|
||||
("Freelist empty with idle engine\n");
|
||||
return NULL;
|
||||
}
|
||||
goto _freelist_entry_found;
|
||||
|
@ -1205,22 +1290,32 @@ drm_buf_t *mach64_freelist_get( drm_mach64_private_t *dev_priv )
|
|||
* to free extra bufs here, leave that to do_release_used_buffers
|
||||
*/
|
||||
list_for_each_safe(ptr, tmp, &dev_priv->pending) {
|
||||
entry = list_entry(ptr, drm_mach64_freelist_t, list);
|
||||
entry =
|
||||
list_entry(ptr, drm_mach64_freelist_t,
|
||||
list);
|
||||
ofs = entry->ring_ofs;
|
||||
if (entry->discard &&
|
||||
((head < tail && (ofs < head || ofs >= tail)) ||
|
||||
(head > tail && (ofs < head && ofs >= tail))) ) {
|
||||
((head < tail
|
||||
&& (ofs < head || ofs >= tail))
|
||||
|| (head > tail
|
||||
&& (ofs < head && ofs >= tail)))) {
|
||||
#if MACH64_EXTRA_CHECKING
|
||||
int i;
|
||||
|
||||
for ( i = head ; i != tail ; i = (i + 4) & ring->tail_mask ) {
|
||||
u32 o1 = le32_to_cpu(((u32 *)ring->start)[i + 1]);
|
||||
for (i = head; i != tail;
|
||||
i = (i + 4) & ring->tail_mask) {
|
||||
u32 o1 =
|
||||
le32_to_cpu(((u32 *) ring->
|
||||
start)[i + 1]);
|
||||
u32 o2 = GETBUFADDR(entry->buf);
|
||||
|
||||
if (o1 == o2) {
|
||||
DRM_ERROR ( "Attempting to free used buffer: "
|
||||
"i=%d buf=0x%08x\n", i, o1 );
|
||||
mach64_dump_ring_info( dev_priv );
|
||||
DRM_ERROR
|
||||
("Attempting to free used buffer: "
|
||||
"i=%d buf=0x%08x\n",
|
||||
i, o1);
|
||||
mach64_dump_ring_info
|
||||
(dev_priv);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
@ -1229,16 +1324,20 @@ drm_buf_t *mach64_freelist_get( drm_mach64_private_t *dev_priv )
|
|||
entry->buf->pending = 0;
|
||||
list_del(ptr);
|
||||
entry->buf->used = 0;
|
||||
list_add_tail(ptr, &dev_priv->placeholders);
|
||||
DRM_DEBUG( "%s: freed processed buffer (head=%d tail=%d "
|
||||
"buf ring ofs=%d).\n", __FUNCTION__, head, tail, ofs );
|
||||
list_add_tail(ptr,
|
||||
&dev_priv->placeholders);
|
||||
DRM_DEBUG
|
||||
("%s: freed processed buffer (head=%d tail=%d "
|
||||
"buf ring ofs=%d).\n",
|
||||
__FUNCTION__, head, tail, ofs);
|
||||
return entry->buf;
|
||||
}
|
||||
}
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
mach64_dump_ring_info(dev_priv);
|
||||
DRM_ERROR( "timeout waiting for buffers: ring head_addr: 0x%08x head: %d tail: %d\n",
|
||||
DRM_ERROR
|
||||
("timeout waiting for buffers: ring head_addr: 0x%08x head: %d tail: %d\n",
|
||||
ring->head_addr, ring->head, ring->tail);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1256,7 +1355,8 @@ _freelist_entry_found:
|
|||
* DMA buffer request and submission IOCTL handler
|
||||
*/
|
||||
|
||||
static int mach64_dma_get_buffers( DRMFILE filp, drm_device_t *dev, drm_dma_t *d )
|
||||
static int mach64_dma_get_buffers(DRMFILE filp, drm_device_t * dev,
|
||||
drm_dma_t * d)
|
||||
{
|
||||
int i;
|
||||
drm_buf_t *buf;
|
||||
|
@ -1265,9 +1365,11 @@ static int mach64_dma_get_buffers( DRMFILE filp, drm_device_t *dev, drm_dma_t *d
|
|||
for (i = d->granted_count; i < d->request_count; i++) {
|
||||
buf = mach64_freelist_get(dev_priv);
|
||||
#if MACH64_EXTRA_CHECKING
|
||||
if ( !buf ) return DRM_ERR(EFAULT);
|
||||
if (!buf)
|
||||
return DRM_ERR(EFAULT);
|
||||
#else
|
||||
if ( !buf ) return DRM_ERR(EAGAIN);
|
||||
if (!buf)
|
||||
return DRM_ERR(EAGAIN);
|
||||
#endif
|
||||
|
||||
buf->filp = filp;
|
||||
|
@ -1297,8 +1399,7 @@ int mach64_dma_buffers( DRM_IOCTL_ARGS )
|
|||
|
||||
/* Please don't send us buffers.
|
||||
*/
|
||||
if ( d.send_count != 0 )
|
||||
{
|
||||
if (d.send_count != 0) {
|
||||
DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
|
||||
DRM_CURRENTPID, d.send_count);
|
||||
return DRM_ERR(EINVAL);
|
||||
|
@ -1306,8 +1407,7 @@ int mach64_dma_buffers( DRM_IOCTL_ARGS )
|
|||
|
||||
/* We'll send you buffers.
|
||||
*/
|
||||
if ( d.request_count < 0 || d.request_count > dma->buf_count )
|
||||
{
|
||||
if (d.request_count < 0 || d.request_count > dma->buf_count) {
|
||||
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
|
||||
DRM_CURRENTPID, d.request_count, dma->buf_count);
|
||||
ret = DRM_ERR(EINVAL);
|
||||
|
@ -1315,8 +1415,7 @@ int mach64_dma_buffers( DRM_IOCTL_ARGS )
|
|||
|
||||
d.granted_count = 0;
|
||||
|
||||
if ( d.request_count )
|
||||
{
|
||||
if (d.request_count) {
|
||||
ret = mach64_dma_get_buffers(filp, dev, &d);
|
||||
}
|
||||
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
#ifndef __MACH64_DRM_H__
|
||||
#define __MACH64_DRM_H__
|
||||
|
||||
|
||||
/* WARNING: If you change any of these defines, make sure to change the
|
||||
* defines in the Xserver file (mach64_sarea.h)
|
||||
*/
|
||||
|
@ -78,7 +77,6 @@
|
|||
*/
|
||||
#define MACH64_NR_SAREA_CLIPRECTS 8
|
||||
|
||||
|
||||
#define MACH64_CARD_HEAP 0
|
||||
#define MACH64_AGP_HEAP 1
|
||||
#define MACH64_NR_TEX_HEAPS 2
|
||||
|
@ -140,12 +138,12 @@ typedef struct drm_mach64_sarea {
|
|||
|
||||
/* Texture memory LRU.
|
||||
*/
|
||||
drm_tex_region_t tex_list[MACH64_NR_TEX_HEAPS][MACH64_NR_TEX_REGIONS+1];
|
||||
drm_tex_region_t tex_list[MACH64_NR_TEX_HEAPS][MACH64_NR_TEX_REGIONS +
|
||||
1];
|
||||
unsigned int tex_age[MACH64_NR_TEX_HEAPS];
|
||||
int ctx_owner;
|
||||
} drm_mach64_sarea_t;
|
||||
|
||||
|
||||
/* WARNING: If you change any of these defines, make sure to change the
|
||||
* defines in the Xserver file (mach64_common.h)
|
||||
*/
|
||||
|
@ -193,7 +191,6 @@ typedef struct drm_mach64_sarea {
|
|||
#define MACH64_PRIM_QUAD_STRIP 0x00000008
|
||||
#define MACH64_PRIM_POLYGON 0x00000009
|
||||
|
||||
|
||||
typedef enum _drm_mach64_dma_mode_t {
|
||||
MACH64_MODE_DMA_ASYNC,
|
||||
MACH64_MODE_DMA_SYNC,
|
||||
|
|
|
@ -140,7 +140,8 @@ extern int mach64_dma_swap( DRM_IOCTL_ARGS );
|
|||
extern int mach64_dma_vertex(DRM_IOCTL_ARGS);
|
||||
extern int mach64_dma_blit(DRM_IOCTL_ARGS);
|
||||
extern int mach64_get_param(DRM_IOCTL_ARGS);
|
||||
extern int mach64_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence);
|
||||
extern int mach64_driver_vblank_wait(drm_device_t * dev,
|
||||
unsigned int *sequence);
|
||||
|
||||
extern irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS);
|
||||
extern void mach64_driver_irq_preinstall(drm_device_t * dev);
|
||||
|
@ -155,7 +156,6 @@ extern void mach64_driver_irq_uninstall( drm_device_t *dev );
|
|||
#define MACH64_AGP_CNTL 0x014c
|
||||
#define MACH64_ALPHA_TST_CNTL 0x0550
|
||||
|
||||
|
||||
#define MACH64_DSP_CONFIG 0x0420
|
||||
#define MACH64_DSP_ON_OFF 0x0424
|
||||
#define MACH64_EXT_MEM_CNTL 0x04ac
|
||||
|
@ -165,7 +165,6 @@ extern void mach64_driver_irq_uninstall( drm_device_t *dev );
|
|||
#define MACH64_MEM_BUF_CNTL 0x042c
|
||||
#define MACH64_MEM_CNTL 0x04b0
|
||||
|
||||
|
||||
#define MACH64_BM_ADDR 0x0648
|
||||
#define MACH64_BM_COMMAND 0x0188
|
||||
#define MACH64_BM_DATA 0x0648
|
||||
|
@ -468,7 +467,6 @@ extern void mach64_driver_irq_uninstall( drm_device_t *dev );
|
|||
#define MACH64_READ(reg) DRM_READ32(dev_priv->mmio, (reg) )
|
||||
#define MACH64_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio, (reg), (val) )
|
||||
|
||||
|
||||
#define DWMREG0 0x0400
|
||||
#define DWMREG0_END 0x07ff
|
||||
#define DWMREG1 0x0000
|
||||
|
@ -507,7 +505,6 @@ extern void mach64_driver_irq_uninstall( drm_device_t *dev );
|
|||
#define MACH64_DMA_CHUNKSIZE 0x1000 /* 4kB per DMA descriptor */
|
||||
#define MACH64_APERTURE_OFFSET 0x7ff800 /* frame-buffer offset for gui-masters */
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* Misc helper macros
|
||||
*/
|
||||
|
@ -518,9 +515,7 @@ static __inline__ void mach64_set_dma_eol( volatile u32 * addr )
|
|||
int nr = 31;
|
||||
|
||||
/* Taken from include/asm-i386/bitops.h linux header */
|
||||
__asm__ __volatile__( "lock;"
|
||||
"btsl %1,%0"
|
||||
:"=m" (*addr)
|
||||
__asm__ __volatile__("lock;" "btsl %1,%0":"=m"(*addr)
|
||||
:"Ir"(nr));
|
||||
#elif defined(__powerpc__)
|
||||
u32 old;
|
||||
|
@ -531,8 +526,7 @@ static __inline__ void mach64_set_dma_eol( volatile u32 * addr )
|
|||
1: lwarx %0,0,%3 \n\
|
||||
or %0,%0,%2 \n\
|
||||
stwcx. %0,0,%3 \n\
|
||||
bne- 1b"
|
||||
: "=&r" (old), "=m" (*addr)
|
||||
bne- 1b":"=&r"(old), "=m"(*addr)
|
||||
:"r"(mask), "r"(addr), "m"(*addr)
|
||||
:"cc");
|
||||
#elif defined(__alpha__)
|
||||
|
@ -540,15 +534,13 @@ static __inline__ void mach64_set_dma_eol( volatile u32 * addr )
|
|||
u32 mask = MACH64_DMA_EOL;
|
||||
|
||||
/* Taken from the include/asm-alpha/bitops.h linux header */
|
||||
__asm__ __volatile__(
|
||||
"1: ldl_l %0,%3\n"
|
||||
__asm__ __volatile__("1: ldl_l %0,%3\n"
|
||||
" bis %0,%2,%0\n"
|
||||
" stl_c %0,%1\n"
|
||||
" beq %0,2f\n"
|
||||
".subsection 2\n"
|
||||
"2: br 1b\n"
|
||||
".previous"
|
||||
:"=&r" (temp), "=m" (*addr)
|
||||
".previous":"=&r"(temp), "=m"(*addr)
|
||||
:"Ir"(mask), "m"(*addr));
|
||||
#else
|
||||
u32 mask = cpu_to_le32(MACH64_DMA_EOL);
|
||||
|
@ -563,9 +555,7 @@ static __inline__ void mach64_clear_dma_eol( volatile u32 * addr )
|
|||
int nr = 31;
|
||||
|
||||
/* Taken from include/asm-i386/bitops.h linux header */
|
||||
__asm__ __volatile__( "lock;"
|
||||
"btrl %1,%0"
|
||||
:"=m" (*addr)
|
||||
__asm__ __volatile__("lock;" "btrl %1,%0":"=m"(*addr)
|
||||
:"Ir"(nr));
|
||||
#elif defined(__powerpc__)
|
||||
u32 old;
|
||||
|
@ -576,8 +566,7 @@ static __inline__ void mach64_clear_dma_eol( volatile u32 * addr )
|
|||
1: lwarx %0,0,%3 \n\
|
||||
andc %0,%0,%2 \n\
|
||||
stwcx. %0,0,%3 \n\
|
||||
bne- 1b"
|
||||
: "=&r" (old), "=m" (*addr)
|
||||
bne- 1b":"=&r"(old), "=m"(*addr)
|
||||
:"r"(mask), "r"(addr), "m"(*addr)
|
||||
:"cc");
|
||||
#elif defined(__alpha__)
|
||||
|
@ -585,15 +574,13 @@ static __inline__ void mach64_clear_dma_eol( volatile u32 * addr )
|
|||
u32 mask = ~MACH64_DMA_EOL;
|
||||
|
||||
/* Taken from the include/asm-alpha/bitops.h linux header */
|
||||
__asm__ __volatile__(
|
||||
"1: ldl_l %0,%3\n"
|
||||
__asm__ __volatile__("1: ldl_l %0,%3\n"
|
||||
" and %0,%2,%0\n"
|
||||
" stl_c %0,%1\n"
|
||||
" beq %0,2f\n"
|
||||
".subsection 2\n"
|
||||
"2: br 1b\n"
|
||||
".previous"
|
||||
:"=&r" (temp), "=m" (*addr)
|
||||
".previous":"=&r"(temp), "=m"(*addr)
|
||||
:"Ir"(mask), "m"(*addr));
|
||||
#else
|
||||
u32 mask = cpu_to_le32(~MACH64_DMA_EOL);
|
||||
|
@ -617,7 +604,8 @@ static __inline__ void mach64_ring_start( drm_mach64_private_t *dev_priv )
|
|||
if (dev_priv->driver_mode != MACH64_MODE_MMIO) {
|
||||
/* enable bus mastering and block 1 registers */
|
||||
MACH64_WRITE(MACH64_BUS_CNTL,
|
||||
( MACH64_READ(MACH64_BUS_CNTL) & ~MACH64_BUS_MASTER_DIS )
|
||||
(MACH64_READ(MACH64_BUS_CNTL) &
|
||||
~MACH64_BUS_MASTER_DIS)
|
||||
| MACH64_BUS_EXT_REG_EN);
|
||||
mach64_do_wait_for_idle(dev_priv);
|
||||
}
|
||||
|
@ -680,7 +668,8 @@ static __inline__ void mach64_ring_tick( drm_mach64_private_t *dev_priv,
|
|||
/* GUI_ACTIVE must be read before BM_GUI_TABLE to
|
||||
* correctly determine the ring head
|
||||
*/
|
||||
int gui_active = MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE;
|
||||
int gui_active =
|
||||
MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE;
|
||||
|
||||
ring->head_addr = MACH64_READ(MACH64_BM_GUI_TABLE) & 0xfffffff0;
|
||||
|
||||
|
@ -696,7 +685,8 @@ static __inline__ void mach64_ring_tick( drm_mach64_private_t *dev_priv,
|
|||
|
||||
if (ring->head_addr < ring->start_addr ||
|
||||
ring->head_addr >= ring->start_addr + ring->size) {
|
||||
DRM_ERROR( "bad ring head address: 0x%08x\n", ring->head_addr );
|
||||
DRM_ERROR("bad ring head address: 0x%08x\n",
|
||||
ring->head_addr);
|
||||
mach64_dump_ring_info(dev_priv);
|
||||
mach64_do_engine_reset(dev_priv);
|
||||
return;
|
||||
|
@ -797,7 +787,6 @@ do { \
|
|||
mach64_ring_tick( dev_priv, &(dev_priv)->ring ); \
|
||||
} while (0)
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* DMA macros
|
||||
*/
|
||||
|
@ -816,9 +805,10 @@ do { \
|
|||
|
||||
#define GETRINGOFFSET() (_entry->ring_ofs)
|
||||
|
||||
static __inline__ int mach64_find_pending_buf_entry ( drm_mach64_private_t *dev_priv,
|
||||
drm_mach64_freelist_t **entry,
|
||||
drm_buf_t *buf )
|
||||
static __inline__ int mach64_find_pending_buf_entry(drm_mach64_private_t *
|
||||
dev_priv,
|
||||
drm_mach64_freelist_t **
|
||||
entry, drm_buf_t * buf)
|
||||
{
|
||||
struct list_head *ptr;
|
||||
#if MACH64_EXTRA_CHECKING
|
||||
|
|
|
@ -57,7 +57,8 @@ irqreturn_t mach64_driver_irq_handler( DRM_IRQ_ARGS )
|
|||
* the ack, despite what the docs say about not acking and enabling
|
||||
* in a single write.
|
||||
*/
|
||||
MACH64_WRITE( MACH64_CRTC_INT_CNTL, (status & ~MACH64_CRTC_INT_ACKS)
|
||||
MACH64_WRITE(MACH64_CRTC_INT_CNTL,
|
||||
(status & ~MACH64_CRTC_INT_ACKS)
|
||||
| MACH64_CRTC_VBLANK_INT);
|
||||
|
||||
atomic_inc(&dev->vbl_received);
|
||||
|
@ -88,7 +89,8 @@ int mach64_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
|
|||
|
||||
/* drm_dma.h hooks
|
||||
*/
|
||||
void mach64_driver_irq_preinstall( drm_device_t *dev ) {
|
||||
void mach64_driver_irq_preinstall(drm_device_t * dev)
|
||||
{
|
||||
drm_mach64_private_t *dev_priv =
|
||||
(drm_mach64_private_t *) dev->dev_private;
|
||||
|
||||
|
@ -101,7 +103,8 @@ void mach64_driver_irq_preinstall( drm_device_t *dev ) {
|
|||
| MACH64_CRTC_VBLANK_INT);
|
||||
}
|
||||
|
||||
void mach64_driver_irq_postinstall( drm_device_t *dev ) {
|
||||
void mach64_driver_irq_postinstall(drm_device_t * dev)
|
||||
{
|
||||
drm_mach64_private_t *dev_priv =
|
||||
(drm_mach64_private_t *) dev->dev_private;
|
||||
|
||||
|
@ -109,11 +112,13 @@ void mach64_driver_irq_postinstall( drm_device_t *dev ) {
|
|||
MACH64_WRITE(MACH64_CRTC_INT_CNTL, MACH64_READ(MACH64_CRTC_INT_CNTL)
|
||||
| MACH64_CRTC_VBLANK_INT_EN);
|
||||
|
||||
DRM_DEBUG("after install CRTC_INT_CTNL: 0x%08x\n", MACH64_READ( MACH64_CRTC_INT_CNTL ));
|
||||
DRM_DEBUG("after install CRTC_INT_CTNL: 0x%08x\n",
|
||||
MACH64_READ(MACH64_CRTC_INT_CNTL));
|
||||
|
||||
}
|
||||
|
||||
void mach64_driver_irq_uninstall( drm_device_t *dev ) {
|
||||
void mach64_driver_irq_uninstall(drm_device_t * dev)
|
||||
{
|
||||
drm_mach64_private_t *dev_priv =
|
||||
(drm_mach64_private_t *) dev->dev_private;
|
||||
if (!dev_priv)
|
||||
|
@ -121,7 +126,8 @@ void mach64_driver_irq_uninstall( drm_device_t *dev ) {
|
|||
|
||||
/* Disable and clear VBLANK interrupt */
|
||||
MACH64_WRITE(MACH64_CRTC_INT_CNTL,
|
||||
(MACH64_READ( MACH64_CRTC_INT_CNTL ) & ~MACH64_CRTC_VBLANK_INT_EN)
|
||||
(MACH64_READ(MACH64_CRTC_INT_CNTL) &
|
||||
~MACH64_CRTC_VBLANK_INT_EN)
|
||||
| MACH64_CRTC_VBLANK_INT);
|
||||
|
||||
DRM_DEBUG("after uninstall CRTC_INT_CTNL: 0x%08x\n",
|
||||
|
|
|
@ -34,7 +34,6 @@
|
|||
#include "mach64_drm.h"
|
||||
#include "mach64_drv.h"
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* DMA hardware state programming functions
|
||||
*/
|
||||
|
@ -44,11 +43,13 @@ static void mach64_print_dirty( const char *msg, unsigned int flags )
|
|||
DRM_DEBUG("%s: (0x%x) %s%s%s%s%s%s%s%s%s%s%s%s\n",
|
||||
msg,
|
||||
flags,
|
||||
(flags & MACH64_UPLOAD_DST_OFF_PITCH) ? "dst_off_pitch, " : "",
|
||||
(flags & MACH64_UPLOAD_DST_OFF_PITCH) ? "dst_off_pitch, " :
|
||||
"",
|
||||
(flags & MACH64_UPLOAD_Z_ALPHA_CNTL) ? "z_alpha_cntl, " : "",
|
||||
(flags & MACH64_UPLOAD_SCALE_3D_CNTL) ? "scale_3d_cntl, " : "",
|
||||
(flags & MACH64_UPLOAD_DP_FOG_CLR) ? "dp_fog_clr, " : "",
|
||||
(flags & MACH64_UPLOAD_DP_WRITE_MASK) ? "dp_write_mask, " : "",
|
||||
(flags & MACH64_UPLOAD_SCALE_3D_CNTL) ? "scale_3d_cntl, " :
|
||||
"", (flags & MACH64_UPLOAD_DP_FOG_CLR) ? "dp_fog_clr, " : "",
|
||||
(flags & MACH64_UPLOAD_DP_WRITE_MASK) ? "dp_write_mask, " :
|
||||
"",
|
||||
(flags & MACH64_UPLOAD_DP_PIX_WIDTH) ? "dp_pix_width, " : "",
|
||||
(flags & MACH64_UPLOAD_SETUP_CNTL) ? "setup_cntl, " : "",
|
||||
(flags & MACH64_UPLOAD_MISC) ? "misc, " : "",
|
||||
|
@ -85,13 +86,19 @@ static int mach64_emit_cliprect( DRMFILE filp, drm_mach64_private_t *dev_priv,
|
|||
scissor.y2 = (regs->sc_top_bottom & 0xffff0000) >> 16;
|
||||
|
||||
/* Intersect GL scissor with cliprect */
|
||||
if ( box->x1 > scissor.x1 ) scissor.x1 = box->x1;
|
||||
if ( box->y1 > scissor.y1 ) scissor.y1 = box->y1;
|
||||
if ( box->x2 < scissor.x2 ) scissor.x2 = box->x2;
|
||||
if ( box->y2 < scissor.y2 ) scissor.y2 = box->y2;
|
||||
if (box->x1 > scissor.x1)
|
||||
scissor.x1 = box->x1;
|
||||
if (box->y1 > scissor.y1)
|
||||
scissor.y1 = box->y1;
|
||||
if (box->x2 < scissor.x2)
|
||||
scissor.x2 = box->x2;
|
||||
if (box->y2 < scissor.y2)
|
||||
scissor.y2 = box->y2;
|
||||
/* positive return means skip */
|
||||
if ( scissor.x1 >= scissor.x2 ) return 1;
|
||||
if ( scissor.y1 >= scissor.y2 ) return 1;
|
||||
if (scissor.x1 >= scissor.x2)
|
||||
return 1;
|
||||
if (scissor.y1 >= scissor.y2)
|
||||
return 1;
|
||||
|
||||
DMAGETPTR(filp, dev_priv, 2); /* returns on failure to get buffer */
|
||||
|
||||
|
@ -106,7 +113,8 @@ static int mach64_emit_cliprect( DRMFILE filp, drm_mach64_private_t *dev_priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static __inline__ int mach64_emit_state( DRMFILE filp, drm_mach64_private_t *dev_priv )
|
||||
static __inline__ int mach64_emit_state(DRMFILE filp,
|
||||
drm_mach64_private_t * dev_priv)
|
||||
{
|
||||
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
|
||||
drm_mach64_context_regs_t *regs = &sarea_priv->context_state;
|
||||
|
@ -180,7 +188,6 @@ static __inline__ int mach64_emit_state( DRMFILE filp, drm_mach64_private_t *dev
|
|||
|
||||
}
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* DMA command dispatch functions
|
||||
*/
|
||||
|
@ -268,17 +275,14 @@ static int mach64_dma_dispatch_clear( DRMFILE filp, drm_device_t *dev,
|
|||
MACH64_FRGD_SRC_FRGD_CLR |
|
||||
MACH64_MONO_SRC_ONE));
|
||||
|
||||
|
||||
}
|
||||
|
||||
if (flags & MACH64_FRONT) {
|
||||
|
||||
DMAOUTREG(MACH64_DST_OFF_PITCH,
|
||||
dev_priv->front_offset_pitch);
|
||||
DMAOUTREG( MACH64_DST_X_Y,
|
||||
(y << 16) | x );
|
||||
DMAOUTREG( MACH64_DST_WIDTH_HEIGHT,
|
||||
(h << 16) | w );
|
||||
DMAOUTREG(MACH64_DST_X_Y, (y << 16) | x);
|
||||
DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w);
|
||||
|
||||
}
|
||||
|
||||
|
@ -286,10 +290,8 @@ static int mach64_dma_dispatch_clear( DRMFILE filp, drm_device_t *dev,
|
|||
|
||||
DMAOUTREG(MACH64_DST_OFF_PITCH,
|
||||
dev_priv->back_offset_pitch);
|
||||
DMAOUTREG( MACH64_DST_X_Y,
|
||||
(y << 16) | x );
|
||||
DMAOUTREG( MACH64_DST_WIDTH_HEIGHT,
|
||||
(h << 16) | w );
|
||||
DMAOUTREG(MACH64_DST_X_Y, (y << 16) | x);
|
||||
DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w);
|
||||
|
||||
}
|
||||
|
||||
|
@ -323,10 +325,8 @@ static int mach64_dma_dispatch_clear( DRMFILE filp, drm_device_t *dev,
|
|||
|
||||
DMAOUTREG(MACH64_DST_OFF_PITCH,
|
||||
dev_priv->depth_offset_pitch);
|
||||
DMAOUTREG( MACH64_DST_X_Y,
|
||||
(y << 16) | x );
|
||||
DMAOUTREG( MACH64_DST_WIDTH_HEIGHT,
|
||||
(h << 16) | w );
|
||||
DMAOUTREG(MACH64_DST_X_Y, (y << 16) | x);
|
||||
DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -375,15 +375,12 @@ static int mach64_dma_dispatch_swap( DRMFILE filp, drm_device_t *dev )
|
|||
DMAOUTREG(MACH64_DP_PIX_WIDTH, ((fb_bpp << 0) |
|
||||
(fb_bpp << 4) |
|
||||
(fb_bpp << 8) |
|
||||
(fb_bpp << 16) |
|
||||
(fb_bpp << 28)) );
|
||||
(fb_bpp << 16) | (fb_bpp << 28)));
|
||||
|
||||
DMAOUTREG(MACH64_DP_WRITE_MASK, 0xffffffff);
|
||||
DMAOUTREG( MACH64_DP_MIX, (MACH64_BKGD_MIX_D |
|
||||
MACH64_FRGD_MIX_S) );
|
||||
DMAOUTREG(MACH64_DP_MIX, (MACH64_BKGD_MIX_D | MACH64_FRGD_MIX_S));
|
||||
DMAOUTREG(MACH64_DP_SRC, (MACH64_BKGD_SRC_BKGD_CLR |
|
||||
MACH64_FRGD_SRC_BLIT |
|
||||
MACH64_MONO_SRC_ONE) );
|
||||
MACH64_FRGD_SRC_BLIT | MACH64_MONO_SRC_ONE));
|
||||
|
||||
DMAOUTREG(MACH64_SRC_OFF_PITCH, dev_priv->back_offset_pitch);
|
||||
DMAOUTREG(MACH64_DST_OFF_PITCH, dev_priv->front_offset_pitch);
|
||||
|
@ -395,8 +392,7 @@ static int mach64_dma_dispatch_swap( DRMFILE filp, drm_device_t *dev )
|
|||
int h = pbox[i].y2 - y;
|
||||
|
||||
DRM_DEBUG("dispatch swap %d,%d-%d,%d\n",
|
||||
pbox[i].x1, pbox[i].y1,
|
||||
pbox[i].x2, pbox[i].y2 );
|
||||
pbox[i].x1, pbox[i].y1, pbox[i].x2, pbox[i].y2);
|
||||
|
||||
DMAOUTREG(MACH64_SRC_WIDTH1, w);
|
||||
DMAOUTREG(MACH64_SRC_Y_X, (x << 16) | y);
|
||||
|
@ -452,7 +448,8 @@ static int mach64_do_get_frames_queued( drm_mach64_private_t *dev_priv )
|
|||
if (ofs == ~0 ||
|
||||
(head < tail && (ofs < head || ofs >= tail)) ||
|
||||
(head > tail && (ofs < head && ofs >= tail))) {
|
||||
sarea_priv->frames_queued = (MACH64_MAX_QUEUED_FRAMES - 1) - i;
|
||||
sarea_priv->frames_queued =
|
||||
(MACH64_MAX_QUEUED_FRAMES - 1) - i;
|
||||
dev_priv->frame_ofs[i] = ~0;
|
||||
}
|
||||
}
|
||||
|
@ -463,7 +460,8 @@ static int mach64_do_get_frames_queued( drm_mach64_private_t *dev_priv )
|
|||
/* Copy and verify a client submited buffer.
|
||||
* FIXME: Make an assembly optimized version
|
||||
*/
|
||||
static __inline__ int copy_and_verify_from_user( u32 *to, const u32 *from, unsigned long bytes )
|
||||
static __inline__ int copy_and_verify_from_user(u32 * to, const u32 * from,
|
||||
unsigned long bytes)
|
||||
{
|
||||
unsigned long n = bytes; /* dwords remaining in buffer */
|
||||
|
||||
|
@ -496,19 +494,23 @@ static __inline__ int copy_and_verify_from_user( u32 *to, const u32 *from, unsig
|
|||
if ((reg >= 0x0190 && reg < 0x01c1) ||
|
||||
(reg >= 0x01ca && reg <= 0x01cf)) {
|
||||
*to++ = data;
|
||||
if ( DRM_COPY_FROM_USER_UNCHECKED( to, from, count << 2 ) ) {
|
||||
DRM_ERROR( "%s: copy_from_user\n", __FUNCTION__ );
|
||||
if (DRM_COPY_FROM_USER_UNCHECKED
|
||||
(to, from, count << 2)) {
|
||||
DRM_ERROR("%s: copy_from_user\n",
|
||||
__FUNCTION__);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
to += count;
|
||||
} else {
|
||||
DRM_ERROR( "%s: Got bad command: 0x%04x\n", __FUNCTION__, reg );
|
||||
DRM_ERROR("%s: Got bad command: 0x%04x\n",
|
||||
__FUNCTION__, reg);
|
||||
return DRM_ERR(EACCES);
|
||||
}
|
||||
|
||||
from += count;
|
||||
} else {
|
||||
DRM_ERROR( "%s: Got bad command count(=%u) dwords remaining=%lu\n",
|
||||
DRM_ERROR
|
||||
("%s: Got bad command count(=%u) dwords remaining=%lu\n",
|
||||
__FUNCTION__, count, n);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
@ -522,8 +524,9 @@ static __inline__ int copy_and_verify_from_user( u32 *to, const u32 *from, unsig
|
|||
}
|
||||
}
|
||||
|
||||
static int mach64_dma_dispatch_vertex( DRMFILE filp, drm_device_t *dev, int prim, void *buf,
|
||||
unsigned long used, int discard )
|
||||
static int mach64_dma_dispatch_vertex(DRMFILE filp, drm_device_t * dev,
|
||||
int prim, void *buf, unsigned long used,
|
||||
int discard)
|
||||
{
|
||||
drm_mach64_private_t *dev_priv = dev->dev_private;
|
||||
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
|
||||
|
@ -547,7 +550,8 @@ static int mach64_dma_dispatch_vertex( DRMFILE filp, drm_device_t *dev, int prim
|
|||
}
|
||||
|
||||
if ((verify_ret =
|
||||
copy_and_verify_from_user( GETBUFPTR( copy_buf ), buf, used )) == 0 ) {
|
||||
copy_and_verify_from_user(GETBUFPTR(copy_buf), buf,
|
||||
used)) == 0) {
|
||||
|
||||
copy_buf->used = used;
|
||||
|
||||
|
@ -555,14 +559,17 @@ static int mach64_dma_dispatch_vertex( DRMFILE filp, drm_device_t *dev, int prim
|
|||
|
||||
if (sarea_priv->dirty & ~MACH64_UPLOAD_CLIPRECTS) {
|
||||
ret = mach64_emit_state(filp, dev_priv);
|
||||
if (ret < 0) return ret;
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
do {
|
||||
/* Emit the next cliprect */
|
||||
if (i < sarea_priv->nbox) {
|
||||
ret = mach64_emit_cliprect(filp, dev_priv,
|
||||
&sarea_priv->boxes[i]);
|
||||
ret =
|
||||
mach64_emit_cliprect(filp, dev_priv,
|
||||
&sarea_priv->
|
||||
boxes[i]);
|
||||
if (ret < 0) {
|
||||
/* failed to get buffer */
|
||||
return ret;
|
||||
|
@ -590,9 +597,12 @@ static int mach64_dma_dispatch_vertex( DRMFILE filp, drm_device_t *dev, int prim
|
|||
drm_mach64_freelist_t *entry;
|
||||
#if MACH64_EXTRA_CHECKING
|
||||
list_for_each(ptr, &dev_priv->pending) {
|
||||
entry = list_entry(ptr, drm_mach64_freelist_t, list);
|
||||
entry =
|
||||
list_entry(ptr, drm_mach64_freelist_t,
|
||||
list);
|
||||
if (copy_buf == entry->buf) {
|
||||
DRM_ERROR( "%s: Trying to release a pending buf\n",
|
||||
DRM_ERROR
|
||||
("%s: Trying to release a pending buf\n",
|
||||
__FUNCTION__);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
|
@ -615,7 +625,6 @@ static int mach64_dma_dispatch_vertex( DRMFILE filp, drm_device_t *dev, int prim
|
|||
return verify_ret;
|
||||
}
|
||||
|
||||
|
||||
static int mach64_dma_dispatch_blit(DRMFILE filp, drm_device_t * dev,
|
||||
drm_mach64_blit_t * blit)
|
||||
{
|
||||
|
@ -695,11 +704,9 @@ static int mach64_dma_dispatch_blit( DRMFILE filp, drm_device_t *dev,
|
|||
|
||||
DMAOUTREG(MACH64_CLR_CMP_CNTL, 0); /* disable */
|
||||
DMAOUTREG(MACH64_GUI_TRAJ_CNTL,
|
||||
MACH64_DST_X_LEFT_TO_RIGHT
|
||||
| MACH64_DST_Y_TOP_TO_BOTTOM );
|
||||
MACH64_DST_X_LEFT_TO_RIGHT | MACH64_DST_Y_TOP_TO_BOTTOM);
|
||||
|
||||
DMAOUTREG( MACH64_DP_PIX_WIDTH,
|
||||
( blit->format << 0 ) /* dst pix width */
|
||||
DMAOUTREG(MACH64_DP_PIX_WIDTH, (blit->format << 0) /* dst pix width */
|
||||
|(blit->format << 4) /* composite pix width */
|
||||
|(blit->format << 8) /* src pix width */
|
||||
|(blit->format << 16) /* host data pix width */
|
||||
|
@ -707,15 +714,13 @@ static int mach64_dma_dispatch_blit( DRMFILE filp, drm_device_t *dev,
|
|||
);
|
||||
|
||||
DMAOUTREG(MACH64_DP_WRITE_MASK, 0xffffffff); /* enable all planes */
|
||||
DMAOUTREG( MACH64_DP_MIX,
|
||||
MACH64_BKGD_MIX_D
|
||||
| MACH64_FRGD_MIX_S );
|
||||
DMAOUTREG(MACH64_DP_MIX, MACH64_BKGD_MIX_D | MACH64_FRGD_MIX_S);
|
||||
DMAOUTREG(MACH64_DP_SRC,
|
||||
MACH64_BKGD_SRC_BKGD_CLR
|
||||
| MACH64_FRGD_SRC_HOST
|
||||
| MACH64_MONO_SRC_ONE );
|
||||
| MACH64_FRGD_SRC_HOST | MACH64_MONO_SRC_ONE);
|
||||
|
||||
DMAOUTREG( MACH64_DST_OFF_PITCH, (blit->pitch << 22) | (blit->offset >> 3) );
|
||||
DMAOUTREG(MACH64_DST_OFF_PITCH,
|
||||
(blit->pitch << 22) | (blit->offset >> 3));
|
||||
DMAOUTREG(MACH64_DST_X_Y, (blit->y << 16) | blit->x);
|
||||
DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (blit->height << 16) | blit->width);
|
||||
|
||||
|
@ -755,8 +760,7 @@ int mach64_dma_clear( DRM_IOCTL_ARGS )
|
|||
|
||||
/* Make sure we restore the 3D state next time.
|
||||
*/
|
||||
sarea_priv->dirty |= (MACH64_UPLOAD_CONTEXT |
|
||||
MACH64_UPLOAD_MISC);
|
||||
sarea_priv->dirty |= (MACH64_UPLOAD_CONTEXT | MACH64_UPLOAD_MISC);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -778,8 +782,7 @@ int mach64_dma_swap( DRM_IOCTL_ARGS )
|
|||
|
||||
/* Make sure we restore the 3D state next time.
|
||||
*/
|
||||
sarea_priv->dirty |= (MACH64_UPLOAD_CONTEXT |
|
||||
MACH64_UPLOAD_MISC);
|
||||
sarea_priv->dirty |= (MACH64_UPLOAD_CONTEXT | MACH64_UPLOAD_MISC);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -804,14 +807,14 @@ int mach64_dma_vertex( DRM_IOCTL_ARGS )
|
|||
__FUNCTION__, DRM_CURRENTPID,
|
||||
vertex.buf, vertex.used, vertex.discard);
|
||||
|
||||
if ( vertex.prim < 0 ||
|
||||
vertex.prim > MACH64_PRIM_POLYGON ) {
|
||||
if (vertex.prim < 0 || vertex.prim > MACH64_PRIM_POLYGON) {
|
||||
DRM_ERROR("buffer prim %d\n", vertex.prim);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
if (vertex.used > MACH64_BUFFER_SIZE || (vertex.used & 3) != 0) {
|
||||
DRM_ERROR( "Invalid vertex buffer size: %lu bytes\n", vertex.used );
|
||||
DRM_ERROR("Invalid vertex buffer size: %lu bytes\n",
|
||||
vertex.used);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
|
@ -850,8 +853,7 @@ int mach64_dma_blit( DRM_IOCTL_ARGS )
|
|||
/* Make sure we restore the 3D state next time.
|
||||
*/
|
||||
sarea_priv->dirty |= (MACH64_UPLOAD_CONTEXT |
|
||||
MACH64_UPLOAD_MISC |
|
||||
MACH64_UPLOAD_CLIPRECTS);
|
||||
MACH64_UPLOAD_MISC | MACH64_UPLOAD_CLIPRECTS);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -41,7 +41,6 @@
|
|||
#define MGA_DEFAULT_USEC_TIMEOUT 10000
|
||||
#define MGA_FREELIST_DEBUG 0
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* Engine control
|
||||
*/
|
||||
|
@ -76,7 +75,8 @@ int mga_do_dma_idle( drm_mga_private_t *dev_priv )
|
|||
|
||||
for (i = 0; i < dev_priv->usec_timeout; i++) {
|
||||
status = MGA_READ(MGA_STATUS) & MGA_DMA_IDLE_MASK;
|
||||
if ( status == MGA_ENDPRDMASTS ) return 0;
|
||||
if (status == MGA_ENDPRDMASTS)
|
||||
return 0;
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
|
||||
|
@ -131,8 +131,7 @@ int mga_do_engine_reset( drm_mga_private_t *dev_priv )
|
|||
#if 0
|
||||
MGA_WRITE(MGA_PRIMPTR,
|
||||
virt_to_bus((void *)dev_priv->prim.status_page) |
|
||||
MGA_PRIMPTREN0 |
|
||||
MGA_PRIMPTREN1 );
|
||||
MGA_PRIMPTREN0 | MGA_PRIMPTREN1);
|
||||
#endif
|
||||
|
||||
MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);
|
||||
|
@ -147,7 +146,6 @@ int mga_do_engine_reset( drm_mga_private_t *dev_priv )
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* Primary DMA stream
|
||||
*/
|
||||
|
@ -164,7 +162,8 @@ void mga_do_dma_flush( drm_mga_private_t *dev_priv )
|
|||
/* We need to wait so that we can do an safe flush */
|
||||
for (i = 0; i < dev_priv->usec_timeout; i++) {
|
||||
status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
|
||||
if ( status == MGA_ENDPRDMASTS ) break;
|
||||
if (status == MGA_ENDPRDMASTS)
|
||||
break;
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
|
||||
|
@ -183,8 +182,7 @@ void mga_do_dma_flush( drm_mga_private_t *dev_priv )
|
|||
|
||||
DMA_BLOCK(MGA_DMAPAD, 0x00000000,
|
||||
MGA_DMAPAD, 0x00000000,
|
||||
MGA_DMAPAD, 0x00000000,
|
||||
MGA_DMAPAD, 0x00000000 );
|
||||
MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
|
||||
|
||||
ADVANCE_DMA();
|
||||
|
||||
|
@ -219,8 +217,7 @@ void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv )
|
|||
|
||||
DMA_BLOCK(MGA_DMAPAD, 0x00000000,
|
||||
MGA_DMAPAD, 0x00000000,
|
||||
MGA_DMAPAD, 0x00000000,
|
||||
MGA_DMAPAD, 0x00000000 );
|
||||
MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
|
||||
|
||||
ADVANCE_DMA();
|
||||
|
||||
|
@ -238,8 +235,7 @@ void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv )
|
|||
primary->space = head - dev_priv->primary->offset;
|
||||
}
|
||||
|
||||
DRM_DEBUG( " head = 0x%06lx\n",
|
||||
head - dev_priv->primary->offset );
|
||||
DRM_DEBUG(" head = 0x%06lx\n", head - dev_priv->primary->offset);
|
||||
DRM_DEBUG(" tail = 0x%06x\n", primary->tail);
|
||||
DRM_DEBUG(" wrap = %d\n", primary->last_wrap);
|
||||
DRM_DEBUG(" space = 0x%06x\n", primary->space);
|
||||
|
@ -268,7 +264,6 @@ void mga_do_dma_wrap_end( drm_mga_private_t *dev_priv )
|
|||
DRM_DEBUG("done.\n");
|
||||
}
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* Freelist management
|
||||
*/
|
||||
|
@ -307,8 +302,7 @@ static int mga_freelist_init( drm_device_t *dev, drm_mga_private_t *dev_priv )
|
|||
int i;
|
||||
DRM_DEBUG("count=%d\n", dma->buf_count);
|
||||
|
||||
dev_priv->head = drm_alloc( sizeof(drm_mga_freelist_t),
|
||||
DRM_MEM_DRIVER );
|
||||
dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
|
||||
if (dev_priv->head == NULL)
|
||||
return DRM_ERR(ENOMEM);
|
||||
|
||||
|
@ -319,8 +313,7 @@ static int mga_freelist_init( drm_device_t *dev, drm_mga_private_t *dev_priv )
|
|||
buf = dma->buflist[i];
|
||||
buf_priv = buf->dev_private;
|
||||
|
||||
entry = drm_alloc( sizeof(drm_mga_freelist_t),
|
||||
DRM_MEM_DRIVER );
|
||||
entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
|
||||
if (entry == NULL)
|
||||
return DRM_ERR(ENOMEM);
|
||||
|
||||
|
@ -376,8 +369,7 @@ static void mga_freelist_reset( drm_device_t *dev )
|
|||
for (i = 0; i < dma->buf_count; i++) {
|
||||
buf = dma->buflist[i];
|
||||
buf_priv = buf->dev_private;
|
||||
SET_AGE( &buf_priv->list_entry->age,
|
||||
MGA_BUFFER_FREE, 0 );
|
||||
SET_AGE(&buf_priv->list_entry->age, MGA_BUFFER_FREE, 0);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -423,8 +415,7 @@ int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf )
|
|||
|
||||
DRM_DEBUG("age=0x%06lx wrap=%d\n",
|
||||
buf_priv->list_entry->age.head -
|
||||
dev_priv->primary->offset,
|
||||
buf_priv->list_entry->age.wrap );
|
||||
dev_priv->primary->offset, buf_priv->list_entry->age.wrap);
|
||||
|
||||
entry = buf_priv->list_entry;
|
||||
head = dev_priv->head;
|
||||
|
@ -446,7 +437,6 @@ int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf )
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* DMA initialization, cleanup
|
||||
*/
|
||||
|
@ -549,8 +539,7 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
|
|||
drm_core_ioremap(dev->agp_buffer_map, dev);
|
||||
|
||||
if (!dev_priv->warp->handle ||
|
||||
!dev_priv->primary->handle ||
|
||||
!dev->agp_buffer_map->handle ) {
|
||||
!dev_priv->primary->handle || !dev->agp_buffer_map->handle) {
|
||||
DRM_ERROR("failed to ioremap agp regions!\n");
|
||||
/* Assign dev_private so we can do cleanup. */
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
@ -582,12 +571,9 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
|
|||
|
||||
/* Init the primary DMA registers.
|
||||
*/
|
||||
MGA_WRITE( MGA_PRIMADDRESS,
|
||||
dev_priv->primary->offset | MGA_DMA_GENERAL );
|
||||
MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL);
|
||||
#if 0
|
||||
MGA_WRITE( MGA_PRIMPTR,
|
||||
virt_to_bus((void *)dev_priv->prim.status) |
|
||||
MGA_PRIMPTREN0 | /* Soft trap, SECEND, SETUPEND */
|
||||
MGA_WRITE(MGA_PRIMPTR, virt_to_bus((void *)dev_priv->prim.status) | MGA_PRIMPTREN0 | /* Soft trap, SECEND, SETUPEND */
|
||||
MGA_PRIMPTREN1); /* DWGSYNC */
|
||||
#endif
|
||||
|
||||
|
@ -633,7 +619,8 @@ int mga_do_cleanup_dma( drm_device_t *dev )
|
|||
* may not have been called from userspace and after dev_private
|
||||
* is freed, it's too late.
|
||||
*/
|
||||
if ( dev->irq_enabled ) drm_irq_uninstall(dev);
|
||||
if (dev->irq_enabled)
|
||||
drm_irq_uninstall(dev);
|
||||
|
||||
if (dev->dev_private) {
|
||||
drm_mga_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -666,7 +653,8 @@ int mga_dma_init( DRM_IOCTL_ARGS )
|
|||
|
||||
LOCK_TEST_WITH_RETURN(dev, filp);
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL( init, (drm_mga_init_t __user *)data, sizeof(init) );
|
||||
DRM_COPY_FROM_USER_IOCTL(init, (drm_mga_init_t __user *) data,
|
||||
sizeof(init));
|
||||
|
||||
switch (init.func) {
|
||||
case MGA_INIT_DMA:
|
||||
|
@ -678,7 +666,6 @@ int mga_dma_init( DRM_IOCTL_ARGS )
|
|||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* Primary DMA stream management
|
||||
*/
|
||||
|
@ -691,7 +678,8 @@ int mga_dma_flush( DRM_IOCTL_ARGS )
|
|||
|
||||
LOCK_TEST_WITH_RETURN(dev, filp);
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL( lock, (drm_lock_t __user *)data, sizeof(lock) );
|
||||
DRM_COPY_FROM_USER_IOCTL(lock, (drm_lock_t __user *) data,
|
||||
sizeof(lock));
|
||||
|
||||
DRM_DEBUG("%s%s%s\n",
|
||||
(lock.flags & _DRM_LOCK_FLUSH) ? "flush, " : "",
|
||||
|
@ -728,20 +716,19 @@ int mga_dma_reset( DRM_IOCTL_ARGS )
|
|||
return mga_do_dma_reset(dev_priv);
|
||||
}
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* DMA buffer management
|
||||
*/
|
||||
|
||||
static int mga_dma_get_buffers( DRMFILE filp,
|
||||
drm_device_t *dev, drm_dma_t *d )
|
||||
static int mga_dma_get_buffers(DRMFILE filp, drm_device_t * dev, drm_dma_t * d)
|
||||
{
|
||||
drm_buf_t *buf;
|
||||
int i;
|
||||
|
||||
for (i = d->granted_count; i < d->request_count; i++) {
|
||||
buf = mga_freelist_get(dev);
|
||||
if ( !buf ) return DRM_ERR(EAGAIN);
|
||||
if (!buf)
|
||||
return DRM_ERR(EAGAIN);
|
||||
|
||||
buf->filp = filp;
|
||||
|
||||
|
|
|
@ -74,7 +74,6 @@
|
|||
#define MGA_CARD_TYPE_G200 1
|
||||
#define MGA_CARD_TYPE_G400 2
|
||||
|
||||
|
||||
#define MGA_FRONT 0x1
|
||||
#define MGA_BACK 0x2
|
||||
#define MGA_DEPTH 0x4
|
||||
|
@ -121,7 +120,6 @@
|
|||
|
||||
#endif /* __MGA_SAREA_DEFINES__ */
|
||||
|
||||
|
||||
/* Setup registers for 3D context
|
||||
*/
|
||||
typedef struct {
|
||||
|
@ -224,7 +222,6 @@ typedef struct _drm_mga_sarea {
|
|||
int ctxOwner;
|
||||
} drm_mga_sarea_t;
|
||||
|
||||
|
||||
/* WARNING: If you change any of these defines, make sure to change the
|
||||
* defines in the Xserver file (xf86drmMga.h)
|
||||
*/
|
||||
|
|
|
@ -187,8 +187,6 @@ static inline u32 _MGA_READ(u32 *addr)
|
|||
#define DMAREG1(r) (u8)(((r - DWGREG1) >> 2) | 0x80)
|
||||
#define DMAREG(r) (ISREG0(r) ? DMAREG0(r) : DMAREG1(r))
|
||||
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* Helper macross...
|
||||
*/
|
||||
|
@ -230,7 +228,6 @@ do { \
|
|||
} \
|
||||
} while (0)
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* Primary DMA command stream
|
||||
*/
|
||||
|
@ -315,7 +312,6 @@ do { \
|
|||
write += DMA_BLOCK_SIZE; \
|
||||
} while (0)
|
||||
|
||||
|
||||
/* Buffer aging via primary DMA stream head pointer.
|
||||
*/
|
||||
|
||||
|
@ -342,7 +338,6 @@ do { \
|
|||
} \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define MGA_ENGINE_IDLE_MASK (MGA_SOFTRAPEN | \
|
||||
MGA_DWGENGSTS | \
|
||||
MGA_ENDPRDMASTS)
|
||||
|
@ -351,8 +346,6 @@ do { \
|
|||
|
||||
#define MGA_DMA_DEBUG 0
|
||||
|
||||
|
||||
|
||||
/* A reduced set of the mga registers.
|
||||
*/
|
||||
#define MGA_CRTC_INDEX 0x1fd4
|
||||
|
@ -607,7 +600,6 @@ do { \
|
|||
# define MGA_G400_WR_MAGIC (1 << 6)
|
||||
# define MGA_G400_WR56_MAGIC 0x46480000 /* 12800.0f */
|
||||
|
||||
|
||||
#define MGA_ILOAD_ALIGN 64
|
||||
#define MGA_ILOAD_MASK (MGA_ILOAD_ALIGN - 1)
|
||||
|
||||
|
|
|
@ -38,8 +38,7 @@
|
|||
irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *) arg;
|
||||
drm_mga_private_t *dev_priv =
|
||||
(drm_mga_private_t *)dev->dev_private;
|
||||
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
|
||||
int status;
|
||||
|
||||
status = MGA_READ(MGA_STATUS);
|
||||
|
@ -73,9 +72,9 @@ int mga_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
|
|||
return ret;
|
||||
}
|
||||
|
||||
void mga_driver_irq_preinstall( drm_device_t *dev ) {
|
||||
drm_mga_private_t *dev_priv =
|
||||
(drm_mga_private_t *)dev->dev_private;
|
||||
void mga_driver_irq_preinstall(drm_device_t * dev)
|
||||
{
|
||||
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
|
||||
|
||||
/* Disable *all* interrupts */
|
||||
MGA_WRITE(MGA_IEN, 0);
|
||||
|
@ -83,17 +82,17 @@ void mga_driver_irq_preinstall( drm_device_t *dev ) {
|
|||
MGA_WRITE(MGA_ICLEAR, ~0);
|
||||
}
|
||||
|
||||
void mga_driver_irq_postinstall( drm_device_t *dev ) {
|
||||
drm_mga_private_t *dev_priv =
|
||||
(drm_mga_private_t *)dev->dev_private;
|
||||
void mga_driver_irq_postinstall(drm_device_t * dev)
|
||||
{
|
||||
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
|
||||
|
||||
/* Turn on VBL interrupt */
|
||||
MGA_WRITE(MGA_IEN, MGA_VLINEIEN);
|
||||
}
|
||||
|
||||
void mga_driver_irq_uninstall( drm_device_t *dev ) {
|
||||
drm_mga_private_t *dev_priv =
|
||||
(drm_mga_private_t *)dev->dev_private;
|
||||
void mga_driver_irq_uninstall(drm_device_t * dev)
|
||||
{
|
||||
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
|
||||
if (!dev_priv)
|
||||
return;
|
||||
|
||||
|
|
|
@ -37,7 +37,6 @@
|
|||
#include "mga_drm.h"
|
||||
#include "mga_drv.h"
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* DMA hardware state programming functions
|
||||
*/
|
||||
|
@ -62,8 +61,7 @@ static void mga_emit_clip_rect( drm_mga_private_t *dev_priv,
|
|||
}
|
||||
DMA_BLOCK(MGA_DMAPAD, 0x00000000,
|
||||
MGA_CXBNDRY, (box->x2 << 16) | box->x1,
|
||||
MGA_YTOP, box->y1 * pitch,
|
||||
MGA_YBOT, box->y2 * pitch );
|
||||
MGA_YTOP, box->y1 * pitch, MGA_YBOT, box->y2 * pitch);
|
||||
|
||||
ADVANCE_DMA();
|
||||
}
|
||||
|
@ -78,18 +76,15 @@ static __inline__ void mga_g200_emit_context( drm_mga_private_t *dev_priv )
|
|||
|
||||
DMA_BLOCK(MGA_DSTORG, ctx->dstorg,
|
||||
MGA_MACCESS, ctx->maccess,
|
||||
MGA_PLNWT, ctx->plnwt,
|
||||
MGA_DWGCTL, ctx->dwgctl );
|
||||
MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl);
|
||||
|
||||
DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl,
|
||||
MGA_FOGCOL, ctx->fogcolor,
|
||||
MGA_WFLAG, ctx->wflag,
|
||||
MGA_ZORG, dev_priv->depth_offset );
|
||||
MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset);
|
||||
|
||||
DMA_BLOCK(MGA_FCOL, ctx->fcol,
|
||||
MGA_DMAPAD, 0x00000000,
|
||||
MGA_DMAPAD, 0x00000000,
|
||||
MGA_DMAPAD, 0x00000000 );
|
||||
MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
|
||||
|
||||
ADVANCE_DMA();
|
||||
}
|
||||
|
@ -104,23 +99,19 @@ static __inline__ void mga_g400_emit_context( drm_mga_private_t *dev_priv )
|
|||
|
||||
DMA_BLOCK(MGA_DSTORG, ctx->dstorg,
|
||||
MGA_MACCESS, ctx->maccess,
|
||||
MGA_PLNWT, ctx->plnwt,
|
||||
MGA_DWGCTL, ctx->dwgctl );
|
||||
MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl);
|
||||
|
||||
DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl,
|
||||
MGA_FOGCOL, ctx->fogcolor,
|
||||
MGA_WFLAG, ctx->wflag,
|
||||
MGA_ZORG, dev_priv->depth_offset );
|
||||
MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset);
|
||||
|
||||
DMA_BLOCK(MGA_WFLAG1, ctx->wflag,
|
||||
MGA_TDUALSTAGE0, ctx->tdualstage0,
|
||||
MGA_TDUALSTAGE1, ctx->tdualstage1,
|
||||
MGA_FCOL, ctx->fcol );
|
||||
MGA_TDUALSTAGE1, ctx->tdualstage1, MGA_FCOL, ctx->fcol);
|
||||
|
||||
DMA_BLOCK(MGA_STENCIL, ctx->stencil,
|
||||
MGA_STENCILCTL, ctx->stencilctl,
|
||||
MGA_DMAPAD, 0x00000000,
|
||||
MGA_DMAPAD, 0x00000000 );
|
||||
MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
|
||||
|
||||
ADVANCE_DMA();
|
||||
}
|
||||
|
@ -140,18 +131,15 @@ static __inline__ void mga_g200_emit_tex0( drm_mga_private_t *dev_priv )
|
|||
|
||||
DMA_BLOCK(MGA_TEXORG, tex->texorg,
|
||||
MGA_TEXORG1, tex->texorg1,
|
||||
MGA_TEXORG2, tex->texorg2,
|
||||
MGA_TEXORG3, tex->texorg3 );
|
||||
MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3);
|
||||
|
||||
DMA_BLOCK(MGA_TEXORG4, tex->texorg4,
|
||||
MGA_TEXWIDTH, tex->texwidth,
|
||||
MGA_TEXHEIGHT, tex->texheight,
|
||||
MGA_WR24, tex->texwidth );
|
||||
MGA_TEXHEIGHT, tex->texheight, MGA_WR24, tex->texwidth);
|
||||
|
||||
DMA_BLOCK(MGA_WR34, tex->texheight,
|
||||
MGA_TEXTRANS, 0x0000ffff,
|
||||
MGA_TEXTRANSHIGH, 0x0000ffff,
|
||||
MGA_DMAPAD, 0x00000000 );
|
||||
MGA_TEXTRANSHIGH, 0x0000ffff, MGA_DMAPAD, 0x00000000);
|
||||
|
||||
ADVANCE_DMA();
|
||||
}
|
||||
|
@ -174,18 +162,15 @@ static __inline__ void mga_g400_emit_tex0( drm_mga_private_t *dev_priv )
|
|||
|
||||
DMA_BLOCK(MGA_TEXORG, tex->texorg,
|
||||
MGA_TEXORG1, tex->texorg1,
|
||||
MGA_TEXORG2, tex->texorg2,
|
||||
MGA_TEXORG3, tex->texorg3 );
|
||||
MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3);
|
||||
|
||||
DMA_BLOCK(MGA_TEXORG4, tex->texorg4,
|
||||
MGA_TEXWIDTH, tex->texwidth,
|
||||
MGA_TEXHEIGHT, tex->texheight,
|
||||
MGA_WR49, 0x00000000 );
|
||||
MGA_TEXHEIGHT, tex->texheight, MGA_WR49, 0x00000000);
|
||||
|
||||
DMA_BLOCK(MGA_WR57, 0x00000000,
|
||||
MGA_WR53, 0x00000000,
|
||||
MGA_WR61, 0x00000000,
|
||||
MGA_WR52, MGA_G400_WR_MAGIC );
|
||||
MGA_WR61, 0x00000000, MGA_WR52, MGA_G400_WR_MAGIC);
|
||||
|
||||
DMA_BLOCK(MGA_WR60, MGA_G400_WR_MAGIC,
|
||||
MGA_WR54, tex->texwidth | MGA_G400_WR_MAGIC,
|
||||
|
@ -194,8 +179,7 @@ static __inline__ void mga_g400_emit_tex0( drm_mga_private_t *dev_priv )
|
|||
|
||||
DMA_BLOCK(MGA_DMAPAD, 0x00000000,
|
||||
MGA_DMAPAD, 0x00000000,
|
||||
MGA_TEXTRANS, 0x0000ffff,
|
||||
MGA_TEXTRANSHIGH, 0x0000ffff );
|
||||
MGA_TEXTRANS, 0x0000ffff, MGA_TEXTRANSHIGH, 0x0000ffff);
|
||||
|
||||
ADVANCE_DMA();
|
||||
}
|
||||
|
@ -220,13 +204,11 @@ static __inline__ void mga_g400_emit_tex1( drm_mga_private_t *dev_priv )
|
|||
|
||||
DMA_BLOCK(MGA_TEXORG, tex->texorg,
|
||||
MGA_TEXORG1, tex->texorg1,
|
||||
MGA_TEXORG2, tex->texorg2,
|
||||
MGA_TEXORG3, tex->texorg3 );
|
||||
MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3);
|
||||
|
||||
DMA_BLOCK(MGA_TEXORG4, tex->texorg4,
|
||||
MGA_TEXWIDTH, tex->texwidth,
|
||||
MGA_TEXHEIGHT, tex->texheight,
|
||||
MGA_WR49, 0x00000000 );
|
||||
MGA_TEXHEIGHT, tex->texheight, MGA_WR49, 0x00000000);
|
||||
|
||||
DMA_BLOCK(MGA_WR57, 0x00000000,
|
||||
MGA_WR53, 0x00000000,
|
||||
|
@ -251,13 +233,11 @@ static __inline__ void mga_g200_emit_pipe( drm_mga_private_t *dev_priv )
|
|||
|
||||
DMA_BLOCK(MGA_WIADDR, MGA_WMODE_SUSPEND,
|
||||
MGA_WVRTXSZ, 0x00000007,
|
||||
MGA_WFLAG, 0x00000000,
|
||||
MGA_WR24, 0x00000000 );
|
||||
MGA_WFLAG, 0x00000000, MGA_WR24, 0x00000000);
|
||||
|
||||
DMA_BLOCK(MGA_WR25, 0x00000100,
|
||||
MGA_WR34, 0x00000000,
|
||||
MGA_WR42, 0x0000ffff,
|
||||
MGA_WR60, 0x0000ffff );
|
||||
MGA_WR42, 0x0000ffff, MGA_WR60, 0x0000ffff);
|
||||
|
||||
/* Padding required to to hardware bug.
|
||||
*/
|
||||
|
@ -265,8 +245,7 @@ static __inline__ void mga_g200_emit_pipe( drm_mga_private_t *dev_priv )
|
|||
MGA_DMAPAD, 0xffffffff,
|
||||
MGA_DMAPAD, 0xffffffff,
|
||||
MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] |
|
||||
MGA_WMODE_START |
|
||||
MGA_WAGP_ENABLE) );
|
||||
MGA_WMODE_START | MGA_WAGP_ENABLE));
|
||||
|
||||
ADVANCE_DMA();
|
||||
}
|
||||
|
@ -283,14 +262,12 @@ static __inline__ void mga_g400_emit_pipe( drm_mga_private_t *dev_priv )
|
|||
|
||||
DMA_BLOCK(MGA_WIADDR2, MGA_WMODE_SUSPEND,
|
||||
MGA_DMAPAD, 0x00000000,
|
||||
MGA_DMAPAD, 0x00000000,
|
||||
MGA_DMAPAD, 0x00000000 );
|
||||
MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
|
||||
|
||||
if (pipe & MGA_T2) {
|
||||
DMA_BLOCK(MGA_WVRTXSZ, 0x00001e09,
|
||||
MGA_DMAPAD, 0x00000000,
|
||||
MGA_DMAPAD, 0x00000000,
|
||||
MGA_DMAPAD, 0x00000000 );
|
||||
MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
|
||||
|
||||
DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000,
|
||||
MGA_WACCEPTSEQ, 0x00000000,
|
||||
|
@ -318,8 +295,7 @@ static __inline__ void mga_g400_emit_pipe( drm_mga_private_t *dev_priv )
|
|||
|
||||
DMA_BLOCK(MGA_WVRTXSZ, 0x00001807,
|
||||
MGA_DMAPAD, 0x00000000,
|
||||
MGA_DMAPAD, 0x00000000,
|
||||
MGA_DMAPAD, 0x00000000 );
|
||||
MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
|
||||
|
||||
DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000,
|
||||
MGA_WACCEPTSEQ, 0x00000000,
|
||||
|
@ -329,8 +305,7 @@ static __inline__ void mga_g400_emit_pipe( drm_mga_private_t *dev_priv )
|
|||
|
||||
DMA_BLOCK(MGA_WFLAG, 0x00000000,
|
||||
MGA_WFLAG1, 0x00000000,
|
||||
MGA_WR56, MGA_G400_WR56_MAGIC,
|
||||
MGA_DMAPAD, 0x00000000 );
|
||||
MGA_WR56, MGA_G400_WR56_MAGIC, MGA_DMAPAD, 0x00000000);
|
||||
|
||||
DMA_BLOCK(MGA_WR49, 0x00000000, /* tex0 */
|
||||
MGA_WR57, 0x00000000, /* tex0 */
|
||||
|
@ -347,8 +322,7 @@ static __inline__ void mga_g400_emit_pipe( drm_mga_private_t *dev_priv )
|
|||
MGA_DMAPAD, 0xffffffff,
|
||||
MGA_DMAPAD, 0xffffffff,
|
||||
MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] |
|
||||
MGA_WMODE_START |
|
||||
MGA_WAGP_ENABLE) );
|
||||
MGA_WMODE_START | MGA_WAGP_ENABLE));
|
||||
|
||||
ADVANCE_DMA();
|
||||
}
|
||||
|
@ -401,7 +375,6 @@ static void mga_g400_emit_state( drm_mga_private_t *dev_priv )
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* SAREA state verification
|
||||
*/
|
||||
|
@ -436,8 +409,7 @@ static int mga_verify_tex( drm_mga_private_t *dev_priv, int unit )
|
|||
org = tex->texorg & (MGA_TEXORGMAP_MASK | MGA_TEXORGACC_MASK);
|
||||
|
||||
if (org == (MGA_TEXORGMAP_SYSMEM | MGA_TEXORGACC_PCI)) {
|
||||
DRM_ERROR( "*** bad TEXORG: 0x%x, unit %d\n",
|
||||
tex->texorg, unit );
|
||||
DRM_ERROR("*** bad TEXORG: 0x%x, unit %d\n", tex->texorg, unit);
|
||||
tex->texorg = 0;
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
@ -498,20 +470,17 @@ static int mga_verify_blit( drm_mga_private_t *dev_priv,
|
|||
{
|
||||
if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) ||
|
||||
(dstorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM)) {
|
||||
DRM_ERROR( "*** bad blit: src=0x%x dst=0x%x\n",
|
||||
srcorg, dstorg );
|
||||
DRM_ERROR("*** bad blit: src=0x%x dst=0x%x\n", srcorg, dstorg);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* ================================================================
|
||||
*
|
||||
*/
|
||||
|
||||
static void mga_dma_dispatch_clear( drm_device_t *dev,
|
||||
drm_mga_clear_t *clear )
|
||||
static void mga_dma_dispatch_clear(drm_device_t * dev, drm_mga_clear_t * clear)
|
||||
{
|
||||
drm_mga_private_t *dev_priv = dev->dev_private;
|
||||
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
|
||||
|
@ -526,8 +495,7 @@ static void mga_dma_dispatch_clear( drm_device_t *dev,
|
|||
|
||||
DMA_BLOCK(MGA_DMAPAD, 0x00000000,
|
||||
MGA_DMAPAD, 0x00000000,
|
||||
MGA_DWGSYNC, 0x00007100,
|
||||
MGA_DWGSYNC, 0x00007000 );
|
||||
MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000);
|
||||
|
||||
ADVANCE_DMA();
|
||||
|
||||
|
@ -549,13 +517,11 @@ static void mga_dma_dispatch_clear( drm_device_t *dev,
|
|||
DMA_BLOCK(MGA_DMAPAD, 0x00000000,
|
||||
MGA_FCOL, clear->clear_color,
|
||||
MGA_DSTORG, dev_priv->front_offset,
|
||||
MGA_DWGCTL + MGA_EXEC,
|
||||
dev_priv->clear_cmd );
|
||||
MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd);
|
||||
|
||||
ADVANCE_DMA();
|
||||
}
|
||||
|
||||
|
||||
if (clear->flags & MGA_BACK) {
|
||||
BEGIN_DMA(2);
|
||||
|
||||
|
@ -567,8 +533,7 @@ static void mga_dma_dispatch_clear( drm_device_t *dev,
|
|||
DMA_BLOCK(MGA_DMAPAD, 0x00000000,
|
||||
MGA_FCOL, clear->clear_color,
|
||||
MGA_DSTORG, dev_priv->back_offset,
|
||||
MGA_DWGCTL + MGA_EXEC,
|
||||
dev_priv->clear_cmd );
|
||||
MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd);
|
||||
|
||||
ADVANCE_DMA();
|
||||
}
|
||||
|
@ -584,8 +549,7 @@ static void mga_dma_dispatch_clear( drm_device_t *dev,
|
|||
DMA_BLOCK(MGA_DMAPAD, 0x00000000,
|
||||
MGA_FCOL, clear->clear_depth,
|
||||
MGA_DSTORG, dev_priv->depth_offset,
|
||||
MGA_DWGCTL + MGA_EXEC,
|
||||
dev_priv->clear_cmd );
|
||||
MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd);
|
||||
|
||||
ADVANCE_DMA();
|
||||
}
|
||||
|
@ -597,8 +561,7 @@ static void mga_dma_dispatch_clear( drm_device_t *dev,
|
|||
/* Force reset of DWGCTL */
|
||||
DMA_BLOCK(MGA_DMAPAD, 0x00000000,
|
||||
MGA_DMAPAD, 0x00000000,
|
||||
MGA_PLNWT, ctx->plnwt,
|
||||
MGA_DWGCTL, ctx->dwgctl );
|
||||
MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl);
|
||||
|
||||
ADVANCE_DMA();
|
||||
|
||||
|
@ -623,8 +586,7 @@ static void mga_dma_dispatch_swap( drm_device_t *dev )
|
|||
|
||||
DMA_BLOCK(MGA_DMAPAD, 0x00000000,
|
||||
MGA_DMAPAD, 0x00000000,
|
||||
MGA_DWGSYNC, 0x00007100,
|
||||
MGA_DWGSYNC, 0x00007000 );
|
||||
MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000);
|
||||
|
||||
DMA_BLOCK(MGA_DSTORG, dev_priv->front_offset,
|
||||
MGA_MACCESS, dev_priv->maccess,
|
||||
|
@ -633,8 +595,7 @@ static void mga_dma_dispatch_swap( drm_device_t *dev )
|
|||
|
||||
DMA_BLOCK(MGA_DMAPAD, 0x00000000,
|
||||
MGA_DMAPAD, 0x00000000,
|
||||
MGA_PLNWT, 0xffffffff,
|
||||
MGA_DWGCTL, MGA_DWGCTL_COPY );
|
||||
MGA_PLNWT, 0xffffffff, MGA_DWGCTL, MGA_DWGCTL_COPY);
|
||||
|
||||
for (i = 0; i < nbox; i++) {
|
||||
drm_clip_rect_t *box = &pbox[i];
|
||||
|
@ -647,14 +608,12 @@ static void mga_dma_dispatch_swap( drm_device_t *dev )
|
|||
DMA_BLOCK(MGA_AR0, start + box->x2 - 1,
|
||||
MGA_AR3, start + box->x1,
|
||||
MGA_FXBNDRY, ((box->x2 - 1) << 16) | box->x1,
|
||||
MGA_YDSTLEN + MGA_EXEC,
|
||||
(box->y1 << 16) | height );
|
||||
MGA_YDSTLEN + MGA_EXEC, (box->y1 << 16) | height);
|
||||
}
|
||||
|
||||
DMA_BLOCK(MGA_DMAPAD, 0x00000000,
|
||||
MGA_PLNWT, ctx->plnwt,
|
||||
MGA_SRCORG, dev_priv->front_offset,
|
||||
MGA_DWGCTL, ctx->dwgctl );
|
||||
MGA_SRCORG, dev_priv->front_offset, MGA_DWGCTL, ctx->dwgctl);
|
||||
|
||||
ADVANCE_DMA();
|
||||
|
||||
|
@ -776,28 +735,22 @@ static void mga_dma_dispatch_iload( drm_device_t *dev, drm_buf_t *buf,
|
|||
|
||||
DMA_BLOCK(MGA_DMAPAD, 0x00000000,
|
||||
MGA_DMAPAD, 0x00000000,
|
||||
MGA_DWGSYNC, 0x00007100,
|
||||
MGA_DWGSYNC, 0x00007000 );
|
||||
MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000);
|
||||
|
||||
DMA_BLOCK(MGA_DSTORG, dstorg,
|
||||
MGA_MACCESS, 0x00000000,
|
||||
MGA_SRCORG, srcorg,
|
||||
MGA_AR5, 64 );
|
||||
MGA_MACCESS, 0x00000000, MGA_SRCORG, srcorg, MGA_AR5, 64);
|
||||
|
||||
DMA_BLOCK(MGA_PITCH, 64,
|
||||
MGA_PLNWT, 0xffffffff,
|
||||
MGA_DMAPAD, 0x00000000,
|
||||
MGA_DWGCTL, MGA_DWGCTL_COPY );
|
||||
MGA_DMAPAD, 0x00000000, MGA_DWGCTL, MGA_DWGCTL_COPY);
|
||||
|
||||
DMA_BLOCK(MGA_AR0, 63,
|
||||
MGA_AR3, 0,
|
||||
MGA_FXBNDRY, (63 << 16) | 0,
|
||||
MGA_YDSTLEN + MGA_EXEC, y2 );
|
||||
MGA_FXBNDRY, (63 << 16) | 0, MGA_YDSTLEN + MGA_EXEC, y2);
|
||||
|
||||
DMA_BLOCK(MGA_PLNWT, ctx->plnwt,
|
||||
MGA_SRCORG, dev_priv->front_offset,
|
||||
MGA_PITCH, dev_priv->front_pitch,
|
||||
MGA_DWGSYNC, 0x00007000 );
|
||||
MGA_PITCH, dev_priv->front_pitch, MGA_DWGSYNC, 0x00007000);
|
||||
|
||||
ADVANCE_DMA();
|
||||
|
||||
|
@ -812,8 +765,7 @@ static void mga_dma_dispatch_iload( drm_device_t *dev, drm_buf_t *buf,
|
|||
FLUSH_DMA();
|
||||
}
|
||||
|
||||
static void mga_dma_dispatch_blit( drm_device_t *dev,
|
||||
drm_mga_blit_t *blit )
|
||||
static void mga_dma_dispatch_blit(drm_device_t * dev, drm_mga_blit_t * blit)
|
||||
{
|
||||
drm_mga_private_t *dev_priv = dev->dev_private;
|
||||
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
|
||||
|
@ -828,13 +780,11 @@ static void mga_dma_dispatch_blit( drm_device_t *dev,
|
|||
|
||||
DMA_BLOCK(MGA_DMAPAD, 0x00000000,
|
||||
MGA_DMAPAD, 0x00000000,
|
||||
MGA_DWGSYNC, 0x00007100,
|
||||
MGA_DWGSYNC, 0x00007000 );
|
||||
MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000);
|
||||
|
||||
DMA_BLOCK(MGA_DWGCTL, MGA_DWGCTL_COPY,
|
||||
MGA_PLNWT, blit->planemask,
|
||||
MGA_SRCORG, blit->srcorg,
|
||||
MGA_DSTORG, blit->dstorg );
|
||||
MGA_SRCORG, blit->srcorg, MGA_DSTORG, blit->dstorg);
|
||||
|
||||
DMA_BLOCK(MGA_SGN, scandir,
|
||||
MGA_MACCESS, dev_priv->maccess,
|
||||
|
@ -868,13 +818,11 @@ static void mga_dma_dispatch_blit( drm_device_t *dev,
|
|||
/* Force reset of DWGCTL */
|
||||
DMA_BLOCK(MGA_DMAPAD, 0x00000000,
|
||||
MGA_PLNWT, ctx->plnwt,
|
||||
MGA_PITCH, dev_priv->front_pitch,
|
||||
MGA_DWGCTL, ctx->dwgctl );
|
||||
MGA_PITCH, dev_priv->front_pitch, MGA_DWGCTL, ctx->dwgctl);
|
||||
|
||||
ADVANCE_DMA();
|
||||
}
|
||||
|
||||
|
||||
/* ================================================================
|
||||
*
|
||||
*/
|
||||
|
@ -888,7 +836,8 @@ int mga_dma_clear( DRM_IOCTL_ARGS )
|
|||
|
||||
LOCK_TEST_WITH_RETURN(dev, filp);
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL( clear, (drm_mga_clear_t __user *)data, sizeof(clear) );
|
||||
DRM_COPY_FROM_USER_IOCTL(clear, (drm_mga_clear_t __user *) data,
|
||||
sizeof(clear));
|
||||
|
||||
if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
|
||||
sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
|
||||
|
@ -941,7 +890,8 @@ int mga_dma_vertex( DRM_IOCTL_ARGS )
|
|||
(drm_mga_vertex_t __user *) data,
|
||||
sizeof(vertex));
|
||||
|
||||
if(vertex.idx < 0 || vertex.idx > dma->buf_count) return DRM_ERR(EINVAL);
|
||||
if (vertex.idx < 0 || vertex.idx > dma->buf_count)
|
||||
return DRM_ERR(EINVAL);
|
||||
buf = dma->buflist[vertex.idx];
|
||||
buf_priv = buf->dev_private;
|
||||
|
||||
|
@ -980,7 +930,8 @@ int mga_dma_indices( DRM_IOCTL_ARGS )
|
|||
(drm_mga_indices_t __user *) data,
|
||||
sizeof(indices));
|
||||
|
||||
if(indices.idx < 0 || indices.idx > dma->buf_count) return DRM_ERR(EINVAL);
|
||||
if (indices.idx < 0 || indices.idx > dma->buf_count)
|
||||
return DRM_ERR(EINVAL);
|
||||
|
||||
buf = dma->buflist[indices.idx];
|
||||
buf_priv = buf->dev_private;
|
||||
|
@ -1016,7 +967,8 @@ int mga_dma_iload( DRM_IOCTL_ARGS )
|
|||
|
||||
LOCK_TEST_WITH_RETURN(dev, filp);
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL( iload, (drm_mga_iload_t __user *)data, sizeof(iload) );
|
||||
DRM_COPY_FROM_USER_IOCTL(iload, (drm_mga_iload_t __user *) data,
|
||||
sizeof(iload));
|
||||
|
||||
#if 0
|
||||
if (mga_do_wait_for_idle(dev_priv) < 0) {
|
||||
|
@ -1025,7 +977,8 @@ int mga_dma_iload( DRM_IOCTL_ARGS )
|
|||
return DRM_ERR(EBUSY);
|
||||
}
|
||||
#endif
|
||||
if(iload.idx < 0 || iload.idx > dma->buf_count) return DRM_ERR(EINVAL);
|
||||
if (iload.idx < 0 || iload.idx > dma->buf_count)
|
||||
return DRM_ERR(EINVAL);
|
||||
|
||||
buf = dma->buflist[iload.idx];
|
||||
buf_priv = buf->dev_private;
|
||||
|
@ -1056,7 +1009,8 @@ int mga_dma_blit( DRM_IOCTL_ARGS )
|
|||
|
||||
LOCK_TEST_WITH_RETURN(dev, filp);
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL( blit, (drm_mga_blit_t __user *)data, sizeof(blit) );
|
||||
DRM_COPY_FROM_USER_IOCTL(blit, (drm_mga_blit_t __user *) data,
|
||||
sizeof(blit));
|
||||
|
||||
if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
|
||||
sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
#include "mga_drv.h"
|
||||
#include "mga_ucode.h"
|
||||
|
||||
|
||||
#define MGA_WARP_CODE_ALIGN 256 /* in bytes */
|
||||
|
||||
#define WARP_UCODE_SIZE( which ) \
|
||||
|
@ -48,7 +47,6 @@ do { \
|
|||
vcbase += WARP_UCODE_SIZE( which ); \
|
||||
} while (0)
|
||||
|
||||
|
||||
static unsigned int mga_warp_g400_microcode_size(drm_mga_private_t * dev_priv)
|
||||
{
|
||||
unsigned int size;
|
||||
|
@ -108,8 +106,7 @@ static int mga_warp_install_g400_microcode( drm_mga_private_t *dev_priv )
|
|||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
|
||||
memset( dev_priv->warp_pipe_phys, 0,
|
||||
sizeof(dev_priv->warp_pipe_phys) );
|
||||
memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
|
||||
|
||||
WARP_UCODE_INSTALL(warp_g400_tgz, MGA_WARP_TGZ);
|
||||
WARP_UCODE_INSTALL(warp_g400_tgzf, MGA_WARP_TGZF);
|
||||
|
@ -145,8 +142,7 @@ static int mga_warp_install_g200_microcode( drm_mga_private_t *dev_priv )
|
|||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
|
||||
memset( dev_priv->warp_pipe_phys, 0,
|
||||
sizeof(dev_priv->warp_pipe_phys) );
|
||||
memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
|
||||
|
||||
WARP_UCODE_INSTALL(warp_g200_tgz, MGA_WARP_TGZ);
|
||||
WARP_UCODE_INSTALL(warp_g200_tgzf, MGA_WARP_TGZF);
|
||||
|
@ -197,8 +193,7 @@ int mga_warp_init( drm_mga_private_t *dev_priv )
|
|||
}
|
||||
|
||||
MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE |
|
||||
MGA_WMASTER_ENABLE |
|
||||
MGA_WCACHEFLUSH_ENABLE) );
|
||||
MGA_WMASTER_ENABLE | MGA_WCACHEFLUSH_ENABLE));
|
||||
wmisc = MGA_READ(MGA_WMISC);
|
||||
if (wmisc != WMISC_EXPECTED) {
|
||||
DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n",
|
||||
|
|
|
@ -106,7 +106,6 @@ static void r128_status( drm_r128_private_t *dev_priv )
|
|||
}
|
||||
#endif
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* Engine, FIFO control
|
||||
*/
|
||||
|
@ -138,7 +137,8 @@ static int r128_do_wait_for_fifo( drm_r128_private_t *dev_priv, int entries )
|
|||
|
||||
for (i = 0; i < dev_priv->usec_timeout; i++) {
|
||||
int slots = R128_READ(R128_GUI_STAT) & R128_GUI_FIFOCNT_MASK;
|
||||
if ( slots >= entries ) return 0;
|
||||
if (slots >= entries)
|
||||
return 0;
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
|
||||
|
@ -153,7 +153,8 @@ static int r128_do_wait_for_idle( drm_r128_private_t *dev_priv )
|
|||
int i, ret;
|
||||
|
||||
ret = r128_do_wait_for_fifo(dev_priv, 64);
|
||||
if ( ret ) return ret;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < dev_priv->usec_timeout; i++) {
|
||||
if (!(R128_READ(R128_GUI_STAT) & R128_GUI_ACTIVE)) {
|
||||
|
@ -169,7 +170,6 @@ static int r128_do_wait_for_idle( drm_r128_private_t *dev_priv )
|
|||
return DRM_ERR(EBUSY);
|
||||
}
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* CCE control, initialization
|
||||
*/
|
||||
|
@ -185,8 +185,7 @@ static void r128_cce_load_microcode( drm_r128_private_t *dev_priv )
|
|||
|
||||
R128_WRITE(R128_PM4_MICROCODE_ADDR, 0);
|
||||
for (i = 0; i < 256; i++) {
|
||||
R128_WRITE( R128_PM4_MICROCODE_DATAH,
|
||||
r128_cce_microcode[i * 2] );
|
||||
R128_WRITE(R128_PM4_MICROCODE_DATAH, r128_cce_microcode[i * 2]);
|
||||
R128_WRITE(R128_PM4_MICROCODE_DATAL,
|
||||
r128_cce_microcode[i * 2 + 1]);
|
||||
}
|
||||
|
@ -287,11 +286,9 @@ static int r128_do_engine_reset( drm_device_t *dev )
|
|||
gen_reset_cntl = R128_READ(R128_GEN_RESET_CNTL);
|
||||
|
||||
/* Taken from the sample code - do not change */
|
||||
R128_WRITE( R128_GEN_RESET_CNTL,
|
||||
gen_reset_cntl | R128_SOFT_RESET_GUI );
|
||||
R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl | R128_SOFT_RESET_GUI);
|
||||
R128_READ(R128_GEN_RESET_CNTL);
|
||||
R128_WRITE( R128_GEN_RESET_CNTL,
|
||||
gen_reset_cntl & ~R128_SOFT_RESET_GUI );
|
||||
R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl & ~R128_SOFT_RESET_GUI);
|
||||
R128_READ(R128_GEN_RESET_CNTL);
|
||||
|
||||
R128_WRITE_PLL(R128_MCLK_CNTL, mclk_cntl);
|
||||
|
@ -496,7 +493,8 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
|
|||
}
|
||||
|
||||
if (!dev_priv->is_pci) {
|
||||
dev_priv->agp_textures = drm_core_findmap(dev, init->agp_textures_offset);
|
||||
dev_priv->agp_textures =
|
||||
drm_core_findmap(dev, init->agp_textures_offset);
|
||||
if (!dev_priv->agp_textures) {
|
||||
DRM_ERROR("could not find agp texture region!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
@ -525,11 +523,11 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
|
|||
} else
|
||||
#endif
|
||||
{
|
||||
dev_priv->cce_ring->handle =
|
||||
(void *)dev_priv->cce_ring->offset;
|
||||
dev_priv->cce_ring->handle = (void *)dev_priv->cce_ring->offset;
|
||||
dev_priv->ring_rptr->handle =
|
||||
(void *)dev_priv->ring_rptr->offset;
|
||||
dev->agp_buffer_map->handle = (void *)dev->agp_buffer_map->offset;
|
||||
dev->agp_buffer_map->handle =
|
||||
(void *)dev->agp_buffer_map->offset;
|
||||
}
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
|
@ -545,8 +543,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
|
|||
dev_priv->ring.size = init->ring_size;
|
||||
dev_priv->ring.size_l2qw = get_order(init->ring_size / 8);
|
||||
|
||||
dev_priv->ring.tail_mask =
|
||||
(dev_priv->ring.size / sizeof(u32)) - 1;
|
||||
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
|
||||
|
||||
dev_priv->ring.high_mark = 128;
|
||||
|
||||
|
@ -554,8 +551,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
|
|||
R128_WRITE(R128_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
|
||||
|
||||
dev_priv->sarea_priv->last_dispatch = 0;
|
||||
R128_WRITE( R128_LAST_DISPATCH_REG,
|
||||
dev_priv->sarea_priv->last_dispatch );
|
||||
R128_WRITE(R128_LAST_DISPATCH_REG, dev_priv->sarea_priv->last_dispatch);
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
if (dev_priv->is_pci) {
|
||||
|
@ -589,7 +585,8 @@ int r128_do_cleanup_cce( drm_device_t *dev )
|
|||
* may not have been called from userspace and after dev_private
|
||||
* is freed, it's too late.
|
||||
*/
|
||||
if ( dev->irq_enabled ) drm_irq_uninstall(dev);
|
||||
if (dev->irq_enabled)
|
||||
drm_irq_uninstall(dev);
|
||||
|
||||
if (dev->dev_private) {
|
||||
drm_r128_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -630,7 +627,8 @@ int r128_cce_init( DRM_IOCTL_ARGS )
|
|||
|
||||
LOCK_TEST_WITH_RETURN(dev, filp);
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL( init, (drm_r128_init_t __user *)data, sizeof(init) );
|
||||
DRM_COPY_FROM_USER_IOCTL(init, (drm_r128_init_t __user *) data,
|
||||
sizeof(init));
|
||||
|
||||
switch (init.func) {
|
||||
case R128_INIT_CCE:
|
||||
|
@ -673,7 +671,8 @@ int r128_cce_stop( DRM_IOCTL_ARGS )
|
|||
|
||||
LOCK_TEST_WITH_RETURN(dev, filp);
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(stop, (drm_r128_cce_stop_t __user *)data, sizeof(stop) );
|
||||
DRM_COPY_FROM_USER_IOCTL(stop, (drm_r128_cce_stop_t __user *) data,
|
||||
sizeof(stop));
|
||||
|
||||
/* Flush any pending CCE commands. This ensures any outstanding
|
||||
* commands are exectuted by the engine before we turn it off.
|
||||
|
@ -687,7 +686,8 @@ int r128_cce_stop( DRM_IOCTL_ARGS )
|
|||
*/
|
||||
if (stop.idle) {
|
||||
ret = r128_do_cce_idle(dev_priv);
|
||||
if ( ret ) return ret;
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Finally, we can turn off the CCE. If the engine isn't idle,
|
||||
|
@ -755,7 +755,6 @@ int r128_fullscreen( DRM_IOCTL_ARGS )
|
|||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* Freelist management
|
||||
*/
|
||||
|
@ -772,8 +771,7 @@ static int r128_freelist_init( drm_device_t *dev )
|
|||
drm_r128_freelist_t *entry;
|
||||
int i;
|
||||
|
||||
dev_priv->head = drm_alloc( sizeof(drm_r128_freelist_t),
|
||||
DRM_MEM_DRIVER );
|
||||
dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER);
|
||||
if (dev_priv->head == NULL)
|
||||
return DRM_ERR(ENOMEM);
|
||||
|
||||
|
@ -784,9 +782,9 @@ static int r128_freelist_init( drm_device_t *dev )
|
|||
buf = dma->buflist[i];
|
||||
buf_priv = buf->dev_private;
|
||||
|
||||
entry = drm_alloc( sizeof(drm_r128_freelist_t),
|
||||
DRM_MEM_DRIVER );
|
||||
if ( !entry ) return DRM_ERR(ENOMEM);
|
||||
entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER);
|
||||
if (!entry)
|
||||
return DRM_ERR(ENOMEM);
|
||||
|
||||
entry->age = R128_BUFFER_FREE;
|
||||
entry->buf = buf;
|
||||
|
@ -860,7 +858,6 @@ void r128_freelist_reset( drm_device_t *dev )
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* CCE command submission
|
||||
*/
|
||||
|
@ -889,7 +886,8 @@ static int r128_cce_get_buffers( DRMFILE filp, drm_device_t *dev, drm_dma_t *d )
|
|||
|
||||
for (i = d->granted_count; i < d->request_count; i++) {
|
||||
buf = r128_freelist_get(dev);
|
||||
if ( !buf ) return DRM_ERR(EAGAIN);
|
||||
if (!buf)
|
||||
return DRM_ERR(EAGAIN);
|
||||
|
||||
buf->filp = filp;
|
||||
|
||||
|
|
|
@ -140,7 +140,6 @@ typedef struct {
|
|||
unsigned int tex_border_color;
|
||||
} drm_r128_texture_regs_t;
|
||||
|
||||
|
||||
typedef struct drm_r128_sarea {
|
||||
/* The channel for communication of state information to the kernel
|
||||
* on firing a vertex buffer.
|
||||
|
@ -168,7 +167,6 @@ typedef struct drm_r128_sarea {
|
|||
int pfCurrentPage; /* which buffer is being displayed? */
|
||||
} drm_r128_sarea_t;
|
||||
|
||||
|
||||
/* WARNING: If you change any of these defines, make sure to change the
|
||||
* defines in the Xserver file (xf86drmR128.h)
|
||||
*/
|
||||
|
|
|
@ -46,7 +46,6 @@
|
|||
#define DRIVER_MINOR 5
|
||||
#define DRIVER_PATCHLEVEL 0
|
||||
|
||||
|
||||
#define GET_RING_HEAD(dev_priv) R128_READ( R128_PM4_BUFFER_DL_RPTR )
|
||||
|
||||
typedef struct drm_r128_freelist {
|
||||
|
@ -271,7 +270,6 @@ extern void r128_driver_prerelease(drm_device_t *dev, DRMFILE filp);
|
|||
# define R128_EVENT_CRTC_OFFSET (1 << 0)
|
||||
#define R128_WINDOW_XY_OFFSET 0x1bcc
|
||||
|
||||
|
||||
/* CCE registers
|
||||
*/
|
||||
#define R128_PM4_BUFFER_OFFSET 0x0700
|
||||
|
@ -322,7 +320,6 @@ extern void r128_driver_prerelease(drm_device_t *dev, DRMFILE filp);
|
|||
#define R128_PM4_FIFO_DATA_EVEN 0x1000
|
||||
#define R128_PM4_FIFO_DATA_ODD 0x1004
|
||||
|
||||
|
||||
/* CCE command packets
|
||||
*/
|
||||
#define R128_CCE_PACKET0 0x00000000
|
||||
|
@ -402,7 +399,6 @@ do { \
|
|||
|
||||
extern int R128_READ_PLL(drm_device_t * dev, int addr);
|
||||
|
||||
|
||||
#define CCE_PACKET0( reg, n ) (R128_CCE_PACKET0 | \
|
||||
((n) << 16) | ((reg) >> 2))
|
||||
#define CCE_PACKET1( reg0, reg1 ) (R128_CCE_PACKET1 | \
|
||||
|
@ -411,9 +407,7 @@ extern int R128_READ_PLL(drm_device_t *dev, int addr);
|
|||
#define CCE_PACKET3( pkt, n ) (R128_CCE_PACKET3 | \
|
||||
(pkt) | ((n) << 16))
|
||||
|
||||
|
||||
static __inline__ void
|
||||
r128_update_ring_snapshot( drm_r128_private_t *dev_priv )
|
||||
static __inline__ void r128_update_ring_snapshot(drm_r128_private_t * dev_priv)
|
||||
{
|
||||
drm_r128_ring_buffer_t *ring = &dev_priv->ring;
|
||||
ring->space = (GET_RING_HEAD(dev_priv) - ring->tail) * sizeof(u32);
|
||||
|
@ -458,7 +452,6 @@ do { \
|
|||
OUT_RING( R128_EVENT_CRTC_OFFSET ); \
|
||||
} while (0)
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* Ring control
|
||||
*/
|
||||
|
|
|
@ -38,8 +38,7 @@
|
|||
irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *) arg;
|
||||
drm_r128_private_t *dev_priv =
|
||||
(drm_r128_private_t *)dev->dev_private;
|
||||
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
|
||||
int status;
|
||||
|
||||
status = R128_READ(R128_GEN_INT_STATUS);
|
||||
|
@ -73,9 +72,9 @@ int r128_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
|
|||
return ret;
|
||||
}
|
||||
|
||||
void r128_driver_irq_preinstall( drm_device_t *dev ) {
|
||||
drm_r128_private_t *dev_priv =
|
||||
(drm_r128_private_t *)dev->dev_private;
|
||||
void r128_driver_irq_preinstall(drm_device_t * dev)
|
||||
{
|
||||
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
|
||||
|
||||
/* Disable *all* interrupts */
|
||||
R128_WRITE(R128_GEN_INT_CNTL, 0);
|
||||
|
@ -83,17 +82,17 @@ void r128_driver_irq_preinstall( drm_device_t *dev ) {
|
|||
R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
|
||||
}
|
||||
|
||||
void r128_driver_irq_postinstall( drm_device_t *dev ) {
|
||||
drm_r128_private_t *dev_priv =
|
||||
(drm_r128_private_t *)dev->dev_private;
|
||||
void r128_driver_irq_postinstall(drm_device_t * dev)
|
||||
{
|
||||
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
|
||||
|
||||
/* Turn on VBL interrupt */
|
||||
R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
|
||||
}
|
||||
|
||||
void r128_driver_irq_uninstall( drm_device_t *dev ) {
|
||||
drm_r128_private_t *dev_priv =
|
||||
(drm_r128_private_t *)dev->dev_private;
|
||||
void r128_driver_irq_uninstall(drm_device_t * dev)
|
||||
{
|
||||
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
|
||||
if (!dev_priv)
|
||||
return;
|
||||
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
#include "r128_drm.h"
|
||||
#include "r128_drv.h"
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* CCE hardware state programming functions
|
||||
*/
|
||||
|
@ -208,8 +207,7 @@ static __inline__ void r128_emit_tex1( drm_r128_private_t *dev_priv )
|
|||
|
||||
BEGIN_RING(5 + R128_MAX_TEXTURE_LEVELS);
|
||||
|
||||
OUT_RING( CCE_PACKET0( R128_SEC_TEX_CNTL_C,
|
||||
1 + R128_MAX_TEXTURE_LEVELS ) );
|
||||
OUT_RING(CCE_PACKET0(R128_SEC_TEX_CNTL_C, 1 + R128_MAX_TEXTURE_LEVELS));
|
||||
OUT_RING(tex->tex_cntl);
|
||||
OUT_RING(tex->tex_combine_cntl);
|
||||
for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) {
|
||||
|
@ -270,15 +268,13 @@ static __inline__ void r128_emit_state( drm_r128_private_t *dev_priv )
|
|||
sarea_priv->dirty &= ~R128_REQUIRE_QUIESCENCE;
|
||||
}
|
||||
|
||||
|
||||
#if R128_PERFORMANCE_BOXES
|
||||
/* ================================================================
|
||||
* Performance monitoring functions
|
||||
*/
|
||||
|
||||
static void r128_clear_box(drm_r128_private_t * dev_priv,
|
||||
int x, int y, int w, int h,
|
||||
int r, int g, int b )
|
||||
int x, int y, int w, int h, int r, int g, int b)
|
||||
{
|
||||
u32 pitch, offset;
|
||||
u32 fb_bpp, color;
|
||||
|
@ -288,8 +284,7 @@ static void r128_clear_box( drm_r128_private_t *dev_priv,
|
|||
case 16:
|
||||
fb_bpp = R128_GMC_DST_16BPP;
|
||||
color = (((r & 0xf8) << 8) |
|
||||
((g & 0xfc) << 3) |
|
||||
((b & 0xf8) >> 3));
|
||||
((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
|
||||
break;
|
||||
case 24:
|
||||
fb_bpp = R128_GMC_DST_24BPP;
|
||||
|
@ -314,8 +309,7 @@ static void r128_clear_box( drm_r128_private_t *dev_priv,
|
|||
fb_bpp |
|
||||
R128_GMC_SRC_DATATYPE_COLOR |
|
||||
R128_ROP3_P |
|
||||
R128_GMC_CLR_CMP_CNTL_DIS |
|
||||
R128_GMC_AUX_CLIP_DIS );
|
||||
R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_AUX_CLIP_DIS);
|
||||
|
||||
OUT_RING((pitch << 21) | (offset >> 5));
|
||||
OUT_RING(color);
|
||||
|
@ -337,7 +331,6 @@ static void r128_cce_performance_boxes( drm_r128_private_t *dev_priv )
|
|||
|
||||
#endif
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* CCE command dispatch functions
|
||||
*/
|
||||
|
@ -374,8 +367,10 @@ static void r128_cce_dispatch_clear( drm_device_t *dev,
|
|||
unsigned int tmp = flags;
|
||||
|
||||
flags &= ~(R128_FRONT | R128_BACK);
|
||||
if ( tmp & R128_FRONT ) flags |= R128_BACK;
|
||||
if ( tmp & R128_BACK ) flags |= R128_FRONT;
|
||||
if (tmp & R128_FRONT)
|
||||
flags |= R128_BACK;
|
||||
if (tmp & R128_BACK)
|
||||
flags |= R128_FRONT;
|
||||
}
|
||||
|
||||
for (i = 0; i < nbox; i++) {
|
||||
|
@ -449,8 +444,7 @@ static void r128_cce_dispatch_clear( drm_device_t *dev,
|
|||
R128_GMC_SRC_DATATYPE_COLOR |
|
||||
R128_ROP3_P |
|
||||
R128_GMC_CLR_CMP_CNTL_DIS |
|
||||
R128_GMC_AUX_CLIP_DIS |
|
||||
R128_GMC_WR_MSK_DIS );
|
||||
R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
|
||||
|
||||
OUT_RING(dev_priv->depth_pitch_offset_c);
|
||||
OUT_RING(clear->clear_depth);
|
||||
|
@ -496,16 +490,14 @@ static void r128_cce_dispatch_swap( drm_device_t *dev )
|
|||
R128_ROP3_S |
|
||||
R128_DP_SRC_SOURCE_MEMORY |
|
||||
R128_GMC_CLR_CMP_CNTL_DIS |
|
||||
R128_GMC_AUX_CLIP_DIS |
|
||||
R128_GMC_WR_MSK_DIS );
|
||||
R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
|
||||
|
||||
/* Make this work even if front & back are flipped:
|
||||
*/
|
||||
if (dev_priv->current_page == 0) {
|
||||
OUT_RING(dev_priv->back_pitch_offset_c);
|
||||
OUT_RING(dev_priv->front_pitch_offset_c);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
OUT_RING(dev_priv->front_pitch_offset_c);
|
||||
OUT_RING(dev_priv->back_pitch_offset_c);
|
||||
}
|
||||
|
@ -537,8 +529,7 @@ static void r128_cce_dispatch_flip( drm_device_t *dev )
|
|||
RING_LOCALS;
|
||||
DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
|
||||
__FUNCTION__,
|
||||
dev_priv->current_page,
|
||||
dev_priv->sarea_priv->pfCurrentPage);
|
||||
dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
|
||||
|
||||
#if R128_PERFORMANCE_BOXES
|
||||
/* Do some trivial performance monitoring...
|
||||
|
@ -575,8 +566,7 @@ static void r128_cce_dispatch_flip( drm_device_t *dev )
|
|||
ADVANCE_RING();
|
||||
}
|
||||
|
||||
static void r128_cce_dispatch_vertex( drm_device_t *dev,
|
||||
drm_buf_t *buf )
|
||||
static void r128_cce_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf)
|
||||
{
|
||||
drm_r128_private_t *dev_priv = dev->dev_private;
|
||||
drm_r128_buf_priv_t *buf_priv = buf->dev_private;
|
||||
|
@ -647,14 +637,12 @@ static void r128_cce_dispatch_vertex( drm_device_t *dev,
|
|||
}
|
||||
|
||||
static void r128_cce_dispatch_indirect(drm_device_t * dev,
|
||||
drm_buf_t *buf,
|
||||
int start, int end )
|
||||
drm_buf_t * buf, int start, int end)
|
||||
{
|
||||
drm_r128_private_t *dev_priv = dev->dev_private;
|
||||
drm_r128_buf_priv_t *buf_priv = buf->dev_private;
|
||||
RING_LOCALS;
|
||||
DRM_DEBUG( "indirect: buf=%d s=0x%x e=0x%x\n",
|
||||
buf->idx, start, end );
|
||||
DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
|
||||
|
||||
if (start != end) {
|
||||
int offset = buf->bus_address + start;
|
||||
|
@ -705,8 +693,7 @@ static void r128_cce_dispatch_indirect( drm_device_t *dev,
|
|||
|
||||
static void r128_cce_dispatch_indices(drm_device_t * dev,
|
||||
drm_buf_t * buf,
|
||||
int start, int end,
|
||||
int count )
|
||||
int start, int end, int count)
|
||||
{
|
||||
drm_r128_private_t *dev_priv = dev->dev_private;
|
||||
drm_r128_buf_priv_t *buf_priv = buf->dev_private;
|
||||
|
@ -789,8 +776,7 @@ static void r128_cce_dispatch_indices( drm_device_t *dev,
|
|||
}
|
||||
|
||||
static int r128_cce_dispatch_blit(DRMFILE filp,
|
||||
drm_device_t *dev,
|
||||
drm_r128_blit_t *blit )
|
||||
drm_device_t * dev, drm_r128_blit_t * blit)
|
||||
{
|
||||
drm_r128_private_t *dev_priv = dev->dev_private;
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
|
@ -866,8 +852,7 @@ static int r128_cce_dispatch_blit( DRMFILE filp,
|
|||
R128_ROP3_S |
|
||||
R128_DP_SRC_SOURCE_HOST_DATA |
|
||||
R128_GMC_CLR_CMP_CNTL_DIS |
|
||||
R128_GMC_AUX_CLIP_DIS |
|
||||
R128_GMC_WR_MSK_DIS) );
|
||||
R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS));
|
||||
|
||||
data[2] = cpu_to_le32((blit->pitch << 21) | (blit->offset >> 5));
|
||||
data[3] = cpu_to_le32(0xffffffff);
|
||||
|
@ -894,7 +879,6 @@ static int r128_cce_dispatch_blit( DRMFILE filp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* Tiled depth buffer management
|
||||
*
|
||||
|
@ -1149,8 +1133,7 @@ static int r128_cce_dispatch_read_span( drm_device_t *dev,
|
|||
R128_GMC_SRC_DATATYPE_COLOR |
|
||||
R128_ROP3_S |
|
||||
R128_DP_SRC_SOURCE_MEMORY |
|
||||
R128_GMC_CLR_CMP_CNTL_DIS |
|
||||
R128_GMC_WR_MSK_DIS );
|
||||
R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
|
||||
|
||||
OUT_RING(dev_priv->depth_pitch_offset_c);
|
||||
OUT_RING(dev_priv->span_pitch_offset_c);
|
||||
|
@ -1214,8 +1197,7 @@ static int r128_cce_dispatch_read_pixels( drm_device_t *dev,
|
|||
R128_GMC_SRC_DATATYPE_COLOR |
|
||||
R128_ROP3_S |
|
||||
R128_DP_SRC_SOURCE_MEMORY |
|
||||
R128_GMC_CLR_CMP_CNTL_DIS |
|
||||
R128_GMC_WR_MSK_DIS );
|
||||
R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
|
||||
|
||||
OUT_RING(dev_priv->depth_pitch_offset_c);
|
||||
OUT_RING(dev_priv->span_pitch_offset_c);
|
||||
|
@ -1233,7 +1215,6 @@ static int r128_cce_dispatch_read_pixels( drm_device_t *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* Polygon stipple
|
||||
*/
|
||||
|
@ -1255,7 +1236,6 @@ static void r128_cce_dispatch_stipple( drm_device_t *dev, u32 *stipple )
|
|||
ADVANCE_RING();
|
||||
}
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* IOCTL functions
|
||||
*/
|
||||
|
@ -1389,8 +1369,7 @@ int r128_cce_vertex( DRM_IOCTL_ARGS )
|
|||
sizeof(vertex));
|
||||
|
||||
DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
|
||||
DRM_CURRENTPID,
|
||||
vertex.idx, vertex.count, vertex.discard );
|
||||
DRM_CURRENTPID, vertex.idx, vertex.count, vertex.discard);
|
||||
|
||||
if (vertex.idx < 0 || vertex.idx >= dma->buf_count) {
|
||||
DRM_ERROR("buffer index %d (of %d max)\n",
|
||||
|
@ -1457,8 +1436,7 @@ int r128_cce_indices( DRM_IOCTL_ARGS )
|
|||
elts.idx, dma->buf_count - 1);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
if ( elts.prim < 0 ||
|
||||
elts.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2 ) {
|
||||
if (elts.prim < 0 || elts.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
|
||||
DRM_ERROR("buffer prim %d\n", elts.prim);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
@ -1573,8 +1551,7 @@ int r128_cce_stipple( DRM_IOCTL_ARGS )
|
|||
DRM_COPY_FROM_USER_IOCTL(stipple, (drm_r128_stipple_t __user *) data,
|
||||
sizeof(stipple));
|
||||
|
||||
if ( DRM_COPY_FROM_USER( &mask, stipple.mask,
|
||||
32 * sizeof(u32) ) )
|
||||
if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32)))
|
||||
return DRM_ERR(EFAULT);
|
||||
|
||||
RING_SPACE_TEST_WITH_RETURN(dev_priv);
|
||||
|
@ -1608,8 +1585,7 @@ int r128_cce_indirect( DRM_IOCTL_ARGS )
|
|||
sizeof(indirect));
|
||||
|
||||
DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n",
|
||||
indirect.idx, indirect.start,
|
||||
indirect.end, indirect.discard );
|
||||
indirect.idx, indirect.start, indirect.end, indirect.discard);
|
||||
|
||||
if (indirect.idx < 0 || indirect.idx >= dma->buf_count) {
|
||||
DRM_ERROR("buffer index %d (of %d max)\n",
|
||||
|
|
|
@ -295,7 +295,6 @@ static u32 R200_cp_microcode[][2] = {
|
|||
{0000000000, 0000000000},
|
||||
};
|
||||
|
||||
|
||||
static u32 radeon_cp_microcode[][2] = {
|
||||
{0x21007000, 0000000000},
|
||||
{0x20007000, 0000000000},
|
||||
|
@ -555,7 +554,6 @@ static u32 radeon_cp_microcode[][2] = {
|
|||
{0000000000, 0000000000},
|
||||
};
|
||||
|
||||
|
||||
int RADEON_READ_PLL(drm_device_t * dev, int addr)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -587,7 +585,6 @@ static void radeon_status( drm_radeon_private_t *dev_priv )
|
|||
}
|
||||
#endif
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* Engine, FIFO control
|
||||
*/
|
||||
|
@ -618,8 +615,7 @@ static int radeon_do_pixcache_flush( drm_radeon_private_t *dev_priv )
|
|||
return DRM_ERR(EBUSY);
|
||||
}
|
||||
|
||||
static int radeon_do_wait_for_fifo( drm_radeon_private_t *dev_priv,
|
||||
int entries )
|
||||
static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -628,7 +624,8 @@ static int radeon_do_wait_for_fifo( drm_radeon_private_t *dev_priv,
|
|||
for (i = 0; i < dev_priv->usec_timeout; i++) {
|
||||
int slots = (RADEON_READ(RADEON_RBBM_STATUS)
|
||||
& RADEON_RBBM_FIFOCNT_MASK);
|
||||
if ( slots >= entries ) return 0;
|
||||
if (slots >= entries)
|
||||
return 0;
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
|
||||
|
@ -646,7 +643,8 @@ static int radeon_do_wait_for_idle( drm_radeon_private_t *dev_priv )
|
|||
dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
|
||||
|
||||
ret = radeon_do_wait_for_fifo(dev_priv, 64);
|
||||
if ( ret ) return ret;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < dev_priv->usec_timeout; i++) {
|
||||
if (!(RADEON_READ(RADEON_RBBM_STATUS)
|
||||
|
@ -664,7 +662,6 @@ static int radeon_do_wait_for_idle( drm_radeon_private_t *dev_priv )
|
|||
return DRM_ERR(EBUSY);
|
||||
}
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* CP control, initialization
|
||||
*/
|
||||
|
@ -679,19 +676,15 @@ static void radeon_cp_load_microcode( drm_radeon_private_t *dev_priv )
|
|||
|
||||
RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0);
|
||||
|
||||
if (dev_priv->is_r200)
|
||||
{
|
||||
if (dev_priv->is_r200) {
|
||||
DRM_INFO("Loading R200 Microcode\n");
|
||||
for ( i = 0 ; i < 256 ; i++ )
|
||||
{
|
||||
for (i = 0; i < 256; i++) {
|
||||
RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
|
||||
R200_cp_microcode[i][1]);
|
||||
RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
|
||||
R200_cp_microcode[i][0]);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
for (i = 0; i < 256; i++) {
|
||||
RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
|
||||
radeon_cp_microcode[i][1]);
|
||||
|
@ -828,7 +821,6 @@ static int radeon_do_engine_reset( drm_device_t *dev )
|
|||
RADEON_SOFT_RESET_RB)));
|
||||
RADEON_READ(RADEON_RBBM_SOFT_RESET);
|
||||
|
||||
|
||||
RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl);
|
||||
RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index);
|
||||
RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset);
|
||||
|
@ -864,13 +856,11 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev,
|
|||
(dev_priv->gart_vm_start >> 16)));
|
||||
|
||||
ring_start = (dev_priv->cp_ring->offset
|
||||
- dev->agp->base
|
||||
+ dev_priv->gart_vm_start);
|
||||
- dev->agp->base + dev_priv->gart_vm_start);
|
||||
} else
|
||||
#endif
|
||||
ring_start = (dev_priv->cp_ring->offset
|
||||
- dev->sg->handle
|
||||
+ dev_priv->gart_vm_start);
|
||||
- dev->sg->handle + dev_priv->gart_vm_start);
|
||||
|
||||
RADEON_WRITE(RADEON_CP_RB_BASE, ring_start);
|
||||
|
||||
|
@ -889,8 +879,7 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev,
|
|||
RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
|
||||
RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
|
||||
dev_priv->ring_rptr->offset
|
||||
- dev->agp->base
|
||||
+ dev_priv->gart_vm_start);
|
||||
- dev->agp->base + dev_priv->gart_vm_start);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
|
@ -900,8 +889,7 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev,
|
|||
tmp_ofs = dev_priv->ring_rptr->offset - dev->sg->handle;
|
||||
page_ofs = tmp_ofs >> PAGE_SHIFT;
|
||||
|
||||
RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR,
|
||||
entry->busaddr[page_ofs]);
|
||||
RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, entry->busaddr[page_ofs]);
|
||||
DRM_DEBUG("ring rptr: offset=0x%08lx handle=0x%08lx\n",
|
||||
(unsigned long)entry->busaddr[page_ofs],
|
||||
entry->handle + tmp_ofs);
|
||||
|
@ -928,7 +916,8 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev,
|
|||
RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef);
|
||||
|
||||
for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) {
|
||||
if ( DRM_READ32( dev_priv->ring_rptr, RADEON_SCRATCHOFF(1) ) == 0xdeadbeef )
|
||||
if (DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)) ==
|
||||
0xdeadbeef)
|
||||
break;
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
|
@ -942,20 +931,19 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev,
|
|||
}
|
||||
|
||||
dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0;
|
||||
RADEON_WRITE( RADEON_LAST_FRAME_REG,
|
||||
dev_priv->sarea_priv->last_frame );
|
||||
RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
|
||||
|
||||
dev_priv->sarea_priv->last_dispatch = dev_priv->scratch[1] = 0;
|
||||
RADEON_WRITE(RADEON_LAST_DISPATCH_REG,
|
||||
dev_priv->sarea_priv->last_dispatch);
|
||||
|
||||
dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0;
|
||||
RADEON_WRITE( RADEON_LAST_CLEAR_REG,
|
||||
dev_priv->sarea_priv->last_clear );
|
||||
RADEON_WRITE(RADEON_LAST_CLEAR_REG, dev_priv->sarea_priv->last_clear);
|
||||
|
||||
/* Set ring buffer size */
|
||||
#ifdef __BIG_ENDIAN
|
||||
RADEON_WRITE( RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw | RADEON_BUF_SWAP_32BIT );
|
||||
RADEON_WRITE(RADEON_CP_RB_CNTL,
|
||||
dev_priv->ring.size_l2qw | RADEON_BUF_SWAP_32BIT);
|
||||
#else
|
||||
RADEON_WRITE(RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw);
|
||||
#endif
|
||||
|
@ -980,7 +968,8 @@ static void radeon_set_pcigart( drm_radeon_private_t *dev_priv, int on )
|
|||
u32 tmp = RADEON_READ(RADEON_AIC_CNTL);
|
||||
|
||||
if (on) {
|
||||
RADEON_WRITE( RADEON_AIC_CNTL, tmp | RADEON_PCIGART_TRANSLATE_EN );
|
||||
RADEON_WRITE(RADEON_AIC_CNTL,
|
||||
tmp | RADEON_PCIGART_TRANSLATE_EN);
|
||||
|
||||
/* set PCI GART page-table base address
|
||||
*/
|
||||
|
@ -997,7 +986,8 @@ static void radeon_set_pcigart( drm_radeon_private_t *dev_priv, int on )
|
|||
RADEON_WRITE(RADEON_MC_AGP_LOCATION, 0xffffffc0); /* ?? */
|
||||
RADEON_WRITE(RADEON_AGP_COMMAND, 0); /* clear AGP_COMMAND */
|
||||
} else {
|
||||
RADEON_WRITE( RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN );
|
||||
RADEON_WRITE(RADEON_AIC_CNTL,
|
||||
tmp & ~RADEON_PCIGART_TRANSLATE_EN);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1076,8 +1066,7 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
|||
RADEON_STENCIL_TEST_ALWAYS |
|
||||
RADEON_STENCIL_S_FAIL_REPLACE |
|
||||
RADEON_STENCIL_ZPASS_REPLACE |
|
||||
RADEON_STENCIL_ZFAIL_REPLACE |
|
||||
RADEON_Z_WRITE_ENABLE);
|
||||
RADEON_STENCIL_ZFAIL_REPLACE | RADEON_Z_WRITE_ENABLE);
|
||||
|
||||
dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW |
|
||||
RADEON_BFACE_SOLID |
|
||||
|
@ -1132,7 +1121,8 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
|||
}
|
||||
|
||||
if (init->gart_textures_offset) {
|
||||
dev_priv->gart_textures = drm_core_findmap(dev, init->gart_textures_offset);
|
||||
dev_priv->gart_textures =
|
||||
drm_core_findmap(dev, init->gart_textures_offset);
|
||||
if (!dev_priv->gart_textures) {
|
||||
DRM_ERROR("could not find GART texture region!\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
|
@ -1159,11 +1149,11 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
|||
} else
|
||||
#endif
|
||||
{
|
||||
dev_priv->cp_ring->handle =
|
||||
(void *)dev_priv->cp_ring->offset;
|
||||
dev_priv->cp_ring->handle = (void *)dev_priv->cp_ring->offset;
|
||||
dev_priv->ring_rptr->handle =
|
||||
(void *)dev_priv->ring_rptr->offset;
|
||||
dev->agp_buffer_map->handle = (void *)dev->agp_buffer_map->offset;
|
||||
dev->agp_buffer_map->handle =
|
||||
(void *)dev->agp_buffer_map->offset;
|
||||
|
||||
DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
|
||||
dev_priv->cp_ring->handle);
|
||||
|
@ -1188,7 +1178,6 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
|||
((dev_priv->depth_offset
|
||||
+ dev_priv->fb_location) >> 10));
|
||||
|
||||
|
||||
dev_priv->gart_size = init->gart_size;
|
||||
dev_priv->gart_vm_start = dev_priv->fb_location
|
||||
+ RADEON_READ(RADEON_CONFIG_APER_SIZE);
|
||||
|
@ -1204,10 +1193,8 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
|||
- dev->sg->handle
|
||||
+ dev_priv->gart_vm_start);
|
||||
|
||||
DRM_DEBUG( "dev_priv->gart_size %d\n",
|
||||
dev_priv->gart_size );
|
||||
DRM_DEBUG( "dev_priv->gart_vm_start 0x%x\n",
|
||||
dev_priv->gart_vm_start );
|
||||
DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size);
|
||||
DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start);
|
||||
DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n",
|
||||
dev_priv->gart_buffers_offset);
|
||||
|
||||
|
@ -1217,8 +1204,7 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
|||
dev_priv->ring.size = init->ring_size;
|
||||
dev_priv->ring.size_l2qw = get_order(init->ring_size / 8);
|
||||
|
||||
dev_priv->ring.tail_mask =
|
||||
(dev_priv->ring.size / sizeof(u32)) - 1;
|
||||
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
|
||||
|
||||
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
|
||||
|
||||
|
@ -1259,7 +1245,8 @@ int radeon_do_cleanup_cp( drm_device_t *dev )
|
|||
* may not have been called from userspace and after dev_private
|
||||
* is freed, it's too late.
|
||||
*/
|
||||
if ( dev->irq_enabled ) drm_irq_uninstall(dev);
|
||||
if (dev->irq_enabled)
|
||||
drm_irq_uninstall(dev);
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
if (dev_priv->flags & CHIP_IS_AGP) {
|
||||
|
@ -1328,7 +1315,6 @@ static int radeon_do_resume_cp( drm_device_t *dev )
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int radeon_cp_init(DRM_IOCTL_ARGS)
|
||||
{
|
||||
DRM_DEVICE;
|
||||
|
@ -1336,7 +1322,8 @@ int radeon_cp_init( DRM_IOCTL_ARGS )
|
|||
|
||||
LOCK_TEST_WITH_RETURN(dev, filp);
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL( init, (drm_radeon_init_t __user *)data, sizeof(init) );
|
||||
DRM_COPY_FROM_USER_IOCTL(init, (drm_radeon_init_t __user *) data,
|
||||
sizeof(init));
|
||||
|
||||
switch (init.func) {
|
||||
case RADEON_INIT_CP:
|
||||
|
@ -1385,7 +1372,8 @@ int radeon_cp_stop( DRM_IOCTL_ARGS )
|
|||
|
||||
LOCK_TEST_WITH_RETURN(dev, filp);
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL( stop, (drm_radeon_cp_stop_t __user *)data, sizeof(stop) );
|
||||
DRM_COPY_FROM_USER_IOCTL(stop, (drm_radeon_cp_stop_t __user *) data,
|
||||
sizeof(stop));
|
||||
|
||||
if (!dev_priv->cp_running)
|
||||
return 0;
|
||||
|
@ -1402,7 +1390,8 @@ int radeon_cp_stop( DRM_IOCTL_ARGS )
|
|||
*/
|
||||
if (stop.idle) {
|
||||
ret = radeon_do_cp_idle(dev_priv);
|
||||
if ( ret ) return ret;
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Finally, we can turn off the CP. If the engine isn't idle,
|
||||
|
@ -1417,7 +1406,6 @@ int radeon_cp_stop( DRM_IOCTL_ARGS )
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void radeon_do_release(drm_device_t * dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -1495,7 +1483,6 @@ int radeon_cp_resume( DRM_IOCTL_ARGS )
|
|||
return radeon_do_resume_cp(dev);
|
||||
}
|
||||
|
||||
|
||||
int radeon_engine_reset(DRM_IOCTL_ARGS)
|
||||
{
|
||||
DRM_DEVICE;
|
||||
|
@ -1506,7 +1493,6 @@ int radeon_engine_reset( DRM_IOCTL_ARGS )
|
|||
return radeon_do_engine_reset(dev);
|
||||
}
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* Fullscreen mode
|
||||
*/
|
||||
|
@ -1518,7 +1504,6 @@ int radeon_fullscreen( DRM_IOCTL_ARGS )
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* Freelist management
|
||||
*/
|
||||
|
@ -1578,6 +1563,7 @@ drm_buf_t *radeon_freelist_get( drm_device_t *dev )
|
|||
DRM_DEBUG("returning NULL!\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#if 0
|
||||
drm_buf_t *radeon_freelist_get(drm_device_t * dev)
|
||||
{
|
||||
|
@ -1627,7 +1613,6 @@ void radeon_freelist_reset( drm_device_t *dev )
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* CP command submission
|
||||
*/
|
||||
|
@ -1664,14 +1649,16 @@ int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n )
|
|||
return DRM_ERR(EBUSY);
|
||||
}
|
||||
|
||||
static int radeon_cp_get_buffers( DRMFILE filp, drm_device_t *dev, drm_dma_t *d )
|
||||
static int radeon_cp_get_buffers(DRMFILE filp, drm_device_t * dev,
|
||||
drm_dma_t * d)
|
||||
{
|
||||
int i;
|
||||
drm_buf_t *buf;
|
||||
|
||||
for (i = d->granted_count; i < d->request_count; i++) {
|
||||
buf = radeon_freelist_get(dev);
|
||||
if ( !buf ) return DRM_ERR(EBUSY); /* NOTE: broken client */
|
||||
if (!buf)
|
||||
return DRM_ERR(EBUSY); /* NOTE: broken client */
|
||||
|
||||
buf->filp = filp;
|
||||
|
||||
|
@ -1743,12 +1730,14 @@ int radeon_preinit( struct drm_device *dev, unsigned long flags )
|
|||
|
||||
/* registers */
|
||||
if ((ret = drm_initmap(dev, pci_resource_start(dev->pdev, 2),
|
||||
pci_resource_len( dev->pdev, 2 ), _DRM_REGISTERS, 0 )))
|
||||
pci_resource_len(dev->pdev, 2), _DRM_REGISTERS,
|
||||
0)))
|
||||
return ret;
|
||||
|
||||
/* framebuffer */
|
||||
if ((ret = drm_initmap(dev, pci_resource_start(dev->pdev, 0),
|
||||
pci_resource_len( dev->pdev, 0 ), _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING )))
|
||||
pci_resource_len(dev->pdev, 0),
|
||||
_DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING)))
|
||||
return ret;
|
||||
|
||||
/* There are signatures in BIOS and PCI-SSID for a PCI card, but they are not very reliable.
|
||||
|
@ -1758,15 +1747,19 @@ int radeon_preinit( struct drm_device *dev, unsigned long flags )
|
|||
restarts next time.
|
||||
*/
|
||||
pci_read_config_dword(dev->pdev, RADEON_AGP_COMMAND_PCI_CONFIG, &save);
|
||||
pci_write_config_dword(dev->pdev, RADEON_AGP_COMMAND_PCI_CONFIG, save | RADEON_AGP_ENABLE);
|
||||
pci_write_config_dword(dev->pdev, RADEON_AGP_COMMAND_PCI_CONFIG,
|
||||
save | RADEON_AGP_ENABLE);
|
||||
pci_read_config_dword(dev->pdev, RADEON_AGP_COMMAND_PCI_CONFIG, &temp);
|
||||
if (temp & RADEON_AGP_ENABLE)
|
||||
dev_priv->flags |= CHIP_IS_AGP;
|
||||
DRM_DEBUG("%s card detected\n", ((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : "PCI"));
|
||||
DRM_DEBUG("%s card detected\n",
|
||||
((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : "PCI"));
|
||||
pci_write_config_dword(dev->pdev, RADEON_AGP_COMMAND_PCI_CONFIG, save);
|
||||
|
||||
/* Check if we need a reset */
|
||||
if (!(dev_priv->mmio = drm_core_findmap(dev , pci_resource_start( dev->pdev, 2 ))))
|
||||
if (!
|
||||
(dev_priv->mmio =
|
||||
drm_core_findmap(dev, pci_resource_start(dev->pdev, 2))))
|
||||
return DRM_ERR(ENOMEM);
|
||||
|
||||
#if defined(__linux__)
|
||||
|
|
|
@ -63,7 +63,6 @@
|
|||
#define RADEON_UPLOAD_ALL 0x003effff
|
||||
#define RADEON_UPLOAD_CONTEXT_ALL 0x003e01ff
|
||||
|
||||
|
||||
/* New style per-packet identifiers for use in cmd_buffer ioctl with
|
||||
* the RADEON_EMIT_PACKET command. Comments relate new packets to old
|
||||
* state bits and the packet size:
|
||||
|
@ -147,7 +146,6 @@
|
|||
#define R200_EMIT_RB3D_BLENDCOLOR 76
|
||||
#define RADEON_MAX_STATE_PACKETS 77
|
||||
|
||||
|
||||
/* Commands understood by cmd_buffer ioctl. More can be added but
|
||||
* obviously these can't be removed or changed:
|
||||
*/
|
||||
|
@ -162,7 +160,6 @@
|
|||
* doesn't make the cpu wait, just
|
||||
* the graphics hardware */
|
||||
|
||||
|
||||
typedef union {
|
||||
int i;
|
||||
struct {
|
||||
|
@ -188,7 +185,6 @@ typedef union {
|
|||
#define RADEON_WAIT_2D 0x1
|
||||
#define RADEON_WAIT_3D 0x2
|
||||
|
||||
|
||||
#define RADEON_FRONT 0x1
|
||||
#define RADEON_BACK 0x2
|
||||
#define RADEON_DEPTH 0x4
|
||||
|
@ -302,7 +298,6 @@ typedef struct {
|
|||
unsigned int se_zbias_constant;
|
||||
} drm_radeon_context2_regs_t;
|
||||
|
||||
|
||||
/* Setup registers for each texture unit
|
||||
*/
|
||||
typedef struct {
|
||||
|
@ -324,7 +319,6 @@ typedef struct {
|
|||
unsigned int vc_format; /* vertex format */
|
||||
} drm_radeon_prim_t;
|
||||
|
||||
|
||||
typedef struct {
|
||||
drm_radeon_context_regs_t context;
|
||||
drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS];
|
||||
|
@ -332,7 +326,6 @@ typedef struct {
|
|||
unsigned int dirty;
|
||||
} drm_radeon_state_t;
|
||||
|
||||
|
||||
typedef struct {
|
||||
/* The channel for communication of state information to the
|
||||
* kernel on firing a vertex buffer with either of the
|
||||
|
@ -355,7 +348,8 @@ typedef struct {
|
|||
unsigned int last_dispatch;
|
||||
unsigned int last_clear;
|
||||
|
||||
drm_tex_region_t tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS+1];
|
||||
drm_tex_region_t tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS +
|
||||
1];
|
||||
unsigned int tex_age[RADEON_NR_TEX_HEAPS];
|
||||
int ctx_owner;
|
||||
int pfState; /* number of 3d windows (0,1,2ormore) */
|
||||
|
@ -363,7 +357,6 @@ typedef struct {
|
|||
int crtc2_base; /* CRTC2 frame offset */
|
||||
} drm_radeon_sarea_t;
|
||||
|
||||
|
||||
/* WARNING: If you change any of these defines, make sure to change the
|
||||
* defines in the Xserver file (xf86drmRadeon.h)
|
||||
*
|
||||
|
@ -556,7 +549,6 @@ typedef struct drm_radeon_indirect {
|
|||
int discard;
|
||||
} drm_radeon_indirect_t;
|
||||
|
||||
|
||||
/* 1.3: An ioctl to get parameters that aren't available to the 3d
|
||||
* client any other way.
|
||||
*/
|
||||
|
@ -602,7 +594,6 @@ typedef struct drm_radeon_mem_init_heap {
|
|||
int start;
|
||||
} drm_radeon_mem_init_heap_t;
|
||||
|
||||
|
||||
/* 1.6: Userspace can request & wait on irq's:
|
||||
*/
|
||||
typedef struct drm_radeon_irq_emit {
|
||||
|
@ -613,7 +604,6 @@ typedef struct drm_radeon_irq_wait {
|
|||
int irq_seq;
|
||||
} drm_radeon_irq_wait_t;
|
||||
|
||||
|
||||
/* 1.10: Clients tell the DRM where they think the framebuffer is located in
|
||||
* the card's address space, via a new generic ioctl to set parameters
|
||||
*/
|
||||
|
@ -625,5 +615,4 @@ typedef struct drm_radeon_setparam {
|
|||
|
||||
#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */
|
||||
|
||||
|
||||
#endif
|
||||
|
|
|
@ -44,7 +44,6 @@
|
|||
#define DRIVER_MINOR 11
|
||||
#define DRIVER_PATCHLEVEL 0
|
||||
|
||||
|
||||
enum radeon_family {
|
||||
CHIP_R100,
|
||||
CHIP_RS100,
|
||||
|
@ -256,15 +255,18 @@ extern int radeon_wait_irq(drm_device_t *dev, int swi_nr);
|
|||
extern int radeon_emit_irq(drm_device_t * dev);
|
||||
|
||||
extern void radeon_do_release(drm_device_t * dev);
|
||||
extern int radeon_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence);
|
||||
extern int radeon_driver_vblank_wait(drm_device_t * dev,
|
||||
unsigned int *sequence);
|
||||
extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
|
||||
extern void radeon_driver_irq_preinstall(drm_device_t * dev);
|
||||
extern void radeon_driver_irq_postinstall(drm_device_t * dev);
|
||||
extern void radeon_driver_irq_uninstall(drm_device_t * dev);
|
||||
extern void radeon_driver_prerelease(drm_device_t * dev, DRMFILE filp);
|
||||
extern void radeon_driver_pretakedown(drm_device_t * dev);
|
||||
extern int radeon_driver_open_helper(drm_device_t *dev, drm_file_t *filp_priv);
|
||||
extern void radeon_driver_free_filp_priv(drm_device_t *dev, drm_file_t *filp_priv);
|
||||
extern int radeon_driver_open_helper(drm_device_t * dev,
|
||||
drm_file_t * filp_priv);
|
||||
extern void radeon_driver_free_filp_priv(drm_device_t * dev,
|
||||
drm_file_t * filp_priv);
|
||||
|
||||
/* Flags for stats.boxes
|
||||
*/
|
||||
|
@ -350,7 +352,6 @@ extern void radeon_driver_free_filp_priv(drm_device_t *dev, drm_file_t *filp_pri
|
|||
? DRM_READ32( dev_priv->ring_rptr, RADEON_SCRATCHOFF(x) ) \
|
||||
: RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x) ) )
|
||||
|
||||
|
||||
#define RADEON_GEN_INT_CNTL 0x0040
|
||||
# define RADEON_CRTC_VBLANK_MASK (1 << 0)
|
||||
# define RADEON_GUI_IDLE_INT_ENABLE (1 << 19)
|
||||
|
@ -551,7 +552,6 @@ extern void radeon_driver_free_filp_priv(drm_device_t *dev, drm_file_t *filp_pri
|
|||
# define RADEON_DEPTH_FORMAT_16BIT_INT_Z (0 << 0)
|
||||
# define RADEON_DEPTH_FORMAT_24BIT_INT_Z (2 << 0)
|
||||
|
||||
|
||||
/* CP registers */
|
||||
#define RADEON_CP_ME_RAM_ADDR 0x07d4
|
||||
#define RADEON_CP_ME_RAM_RADDR 0x07d8
|
||||
|
@ -744,7 +744,6 @@ extern void radeon_driver_free_filp_priv(drm_device_t *dev, drm_file_t *filp_pri
|
|||
#define RADEON_PP_TEX_SIZE_1 0x1d0c
|
||||
#define RADEON_PP_TEX_SIZE_2 0x1d14
|
||||
|
||||
|
||||
#define SE_VAP_CNTL__TCL_ENA_MASK 0x00000001
|
||||
#define SE_VAP_CNTL__FORCE_W_TO_ONE_MASK 0x00010000
|
||||
#define SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT 0x00000012
|
||||
|
@ -801,7 +800,6 @@ extern int radeon_postcleanup( struct drm_device *dev );
|
|||
#define CP_PACKET3( pkt, n ) \
|
||||
(RADEON_CP_PACKET3 | (pkt) | ((n) << 16))
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* Engine control helper macros
|
||||
*/
|
||||
|
@ -850,7 +848,6 @@ extern int radeon_postcleanup( struct drm_device *dev );
|
|||
OUT_RING( RADEON_RB3D_ZC_FLUSH_ALL ); \
|
||||
} while (0)
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* Misc helper macros
|
||||
*/
|
||||
|
@ -892,7 +889,6 @@ do { \
|
|||
OUT_RING( age ); \
|
||||
} while (0)
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* Ring control
|
||||
*/
|
||||
|
@ -953,7 +949,6 @@ do { \
|
|||
OUT_RING( val ); \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define OUT_RING_USER_TABLE( tab, sz ) do { \
|
||||
int _size = (sz); \
|
||||
int __user *_tab = (tab); \
|
||||
|
@ -976,5 +971,4 @@ do { \
|
|||
write &= mask; \
|
||||
} while (0)
|
||||
|
||||
|
||||
#endif /* __RADEON_DRV_H__ */
|
||||
|
|
|
@ -111,7 +111,6 @@ int radeon_emit_irq(drm_device_t *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int radeon_wait_irq(drm_device_t * dev, int swi_nr)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv =
|
||||
|
@ -139,7 +138,6 @@ int radeon_emit_and_wait_irq(drm_device_t *dev)
|
|||
return radeon_wait_irq(dev, radeon_emit_irq(dev));
|
||||
}
|
||||
|
||||
|
||||
int radeon_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv =
|
||||
|
@ -169,7 +167,6 @@ int radeon_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/* Needs the lock as it touches the ring.
|
||||
*/
|
||||
int radeon_irq_emit(DRM_IOCTL_ARGS)
|
||||
|
@ -199,7 +196,6 @@ int radeon_irq_emit( DRM_IOCTL_ARGS )
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* Doesn't need the hardware lock.
|
||||
*/
|
||||
int radeon_irq_wait(DRM_IOCTL_ARGS)
|
||||
|
@ -219,10 +215,10 @@ int radeon_irq_wait( DRM_IOCTL_ARGS )
|
|||
return radeon_wait_irq(dev, irqwait.irq_seq);
|
||||
}
|
||||
|
||||
|
||||
/* drm_dma.h hooks
|
||||
*/
|
||||
void radeon_driver_irq_preinstall( drm_device_t *dev ) {
|
||||
void radeon_driver_irq_preinstall(drm_device_t * dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv =
|
||||
(drm_radeon_private_t *) dev->dev_private;
|
||||
|
||||
|
@ -233,7 +229,8 @@ void radeon_driver_irq_preinstall( drm_device_t *dev ) {
|
|||
radeon_acknowledge_irqs(dev_priv);
|
||||
}
|
||||
|
||||
void radeon_driver_irq_postinstall( drm_device_t *dev ) {
|
||||
void radeon_driver_irq_postinstall(drm_device_t * dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv =
|
||||
(drm_radeon_private_t *) dev->dev_private;
|
||||
|
||||
|
@ -242,11 +239,11 @@ void radeon_driver_irq_postinstall( drm_device_t *dev ) {
|
|||
|
||||
/* Turn on SW and VBL ints */
|
||||
RADEON_WRITE(RADEON_GEN_INT_CNTL,
|
||||
RADEON_CRTC_VBLANK_MASK |
|
||||
RADEON_SW_INT_ENABLE );
|
||||
RADEON_CRTC_VBLANK_MASK | RADEON_SW_INT_ENABLE);
|
||||
}
|
||||
|
||||
void radeon_driver_irq_uninstall( drm_device_t *dev ) {
|
||||
void radeon_driver_irq_uninstall(drm_device_t * dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv =
|
||||
(drm_radeon_private_t *) dev->dev_private;
|
||||
if (!dev_priv)
|
||||
|
|
|
@ -43,7 +43,8 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
|
|||
{
|
||||
/* Maybe cut off the start of an existing block */
|
||||
if (start > p->start) {
|
||||
struct mem_block *newblock = drm_alloc(sizeof(*newblock), DRM_MEM_BUFS );
|
||||
struct mem_block *newblock =
|
||||
drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
|
||||
if (!newblock)
|
||||
goto out;
|
||||
newblock->start = start;
|
||||
|
@ -59,7 +60,8 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
|
|||
|
||||
/* Maybe cut off the end of an existing block */
|
||||
if (size < p->size) {
|
||||
struct mem_block *newblock = drm_alloc(sizeof(*newblock), DRM_MEM_BUFS );
|
||||
struct mem_block *newblock =
|
||||
drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
|
||||
if (!newblock)
|
||||
goto out;
|
||||
newblock->start = start + size;
|
||||
|
@ -104,7 +106,6 @@ static struct mem_block *find_block( struct mem_block *heap, int start )
|
|||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
static void free_block(struct mem_block *p)
|
||||
{
|
||||
p->filp = NULL;
|
||||
|
@ -155,7 +156,6 @@ static int init_heap(struct mem_block **heap, int start, int size)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* Free all blocks associated with the releasing file.
|
||||
*/
|
||||
void radeon_mem_release(DRMFILE filp, struct mem_block *heap)
|
||||
|
@ -203,12 +203,9 @@ void radeon_mem_takedown( struct mem_block **heap )
|
|||
*heap = NULL;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* IOCTL HANDLERS */
|
||||
|
||||
static struct mem_block **get_heap( drm_radeon_private_t *dev_priv,
|
||||
int region )
|
||||
static struct mem_block **get_heap(drm_radeon_private_t * dev_priv, int region)
|
||||
{
|
||||
switch (region) {
|
||||
case RADEON_MEM_REGION_GART:
|
||||
|
@ -245,14 +242,12 @@ int radeon_mem_alloc( DRM_IOCTL_ARGS )
|
|||
if (alloc.alignment < 12)
|
||||
alloc.alignment = 12;
|
||||
|
||||
block = alloc_block( *heap, alloc.size, alloc.alignment,
|
||||
filp );
|
||||
block = alloc_block(*heap, alloc.size, alloc.alignment, filp);
|
||||
|
||||
if (!block)
|
||||
return DRM_ERR(ENOMEM);
|
||||
|
||||
if ( DRM_COPY_TO_USER( alloc.region_offset, &block->start,
|
||||
sizeof(int) ) ) {
|
||||
if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) {
|
||||
DRM_ERROR("copy_to_user\n");
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
|
@ -260,8 +255,6 @@ int radeon_mem_alloc( DRM_IOCTL_ARGS )
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
int radeon_mem_free(DRM_IOCTL_ARGS)
|
||||
{
|
||||
DRM_DEVICE;
|
||||
|
@ -304,7 +297,8 @@ int radeon_mem_init_heap( DRM_IOCTL_ARGS )
|
|||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL( initheap, (drm_radeon_mem_init_heap_t __user *)data,
|
||||
DRM_COPY_FROM_USER_IOCTL(initheap,
|
||||
(drm_radeon_mem_init_heap_t __user *) data,
|
||||
sizeof(initheap));
|
||||
|
||||
heap = get_heap(dev_priv, initheap.region);
|
||||
|
@ -318,5 +312,3 @@ int radeon_mem_init_heap( DRM_IOCTL_ARGS )
|
|||
|
||||
return init_heap(heap, initheap.start, initheap.size);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -33,14 +33,15 @@
|
|||
#include "radeon_drm.h"
|
||||
#include "radeon_drv.h"
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* Helper functions for client state checking and fixup
|
||||
*/
|
||||
|
||||
static __inline__ int radeon_check_and_fixup_offset( drm_radeon_private_t *dev_priv,
|
||||
static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
|
||||
dev_priv,
|
||||
drm_file_t * filp_priv,
|
||||
u32 *offset ) {
|
||||
u32 * offset)
|
||||
{
|
||||
u32 off = *offset;
|
||||
struct drm_radeon_driver_file_fields *radeon_priv;
|
||||
|
||||
|
@ -63,9 +64,11 @@ static __inline__ int radeon_check_and_fixup_offset( drm_radeon_private_t *dev_p
|
|||
return 0;
|
||||
}
|
||||
|
||||
static __inline__ int radeon_check_and_fixup_offset_user( drm_radeon_private_t *dev_priv,
|
||||
static __inline__ int radeon_check_and_fixup_offset_user(drm_radeon_private_t *
|
||||
dev_priv,
|
||||
drm_file_t * filp_priv,
|
||||
u32 __user *offset ) {
|
||||
u32 __user * offset)
|
||||
{
|
||||
u32 off;
|
||||
|
||||
DRM_GET_USER_UNCHECKED(off, offset);
|
||||
|
@ -78,16 +81,16 @@ static __inline__ int radeon_check_and_fixup_offset_user( drm_radeon_private_t *
|
|||
return 0;
|
||||
}
|
||||
|
||||
static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_priv,
|
||||
static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
|
||||
dev_priv,
|
||||
drm_file_t * filp_priv,
|
||||
int id,
|
||||
u32 __user *data ) {
|
||||
int id, u32 __user * data)
|
||||
{
|
||||
switch (id) {
|
||||
|
||||
case RADEON_EMIT_PP_MISC:
|
||||
if (radeon_check_and_fixup_offset_user(dev_priv, filp_priv,
|
||||
&data[( RADEON_RB3D_DEPTHOFFSET
|
||||
- RADEON_PP_MISC ) / 4] ) ) {
|
||||
&data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) {
|
||||
DRM_ERROR("Invalid depth buffer offset\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
@ -95,8 +98,7 @@ static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_
|
|||
|
||||
case RADEON_EMIT_PP_CNTL:
|
||||
if (radeon_check_and_fixup_offset_user(dev_priv, filp_priv,
|
||||
&data[( RADEON_RB3D_COLOROFFSET
|
||||
- RADEON_PP_CNTL ) / 4] ) ) {
|
||||
&data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) {
|
||||
DRM_ERROR("Invalid colour buffer offset\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
@ -119,8 +121,7 @@ static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_
|
|||
case RADEON_EMIT_PP_TXFILTER_1:
|
||||
case RADEON_EMIT_PP_TXFILTER_2:
|
||||
if (radeon_check_and_fixup_offset_user(dev_priv, filp_priv,
|
||||
&data[( RADEON_PP_TXOFFSET_0
|
||||
- RADEON_PP_TXFILTER_0 ) / 4] ) ) {
|
||||
&data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) {
|
||||
DRM_ERROR("Invalid R100 texture offset\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
@ -136,8 +137,10 @@ static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_
|
|||
for (i = 0; i < 5; i++) {
|
||||
if (radeon_check_and_fixup_offset_user(dev_priv,
|
||||
filp_priv,
|
||||
&data[i] ) ) {
|
||||
DRM_ERROR( "Invalid R200 cubic texture offset\n" );
|
||||
&data
|
||||
[i])) {
|
||||
DRM_ERROR
|
||||
("Invalid R200 cubic texture offset\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
}
|
||||
|
@ -215,10 +218,13 @@ static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_
|
|||
return 0;
|
||||
}
|
||||
|
||||
static __inline__ int radeon_check_and_fixup_packet3( drm_radeon_private_t *dev_priv,
|
||||
static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
|
||||
dev_priv,
|
||||
drm_file_t * filp_priv,
|
||||
drm_radeon_cmd_buffer_t *cmdbuf,
|
||||
unsigned int *cmdsz ) {
|
||||
drm_radeon_cmd_buffer_t *
|
||||
cmdbuf,
|
||||
unsigned int *cmdsz)
|
||||
{
|
||||
u32 tmp[4];
|
||||
u32 __user *cmd = (u32 __user *) cmdbuf->buf;
|
||||
|
||||
|
@ -246,7 +252,8 @@ static __inline__ int radeon_check_and_fixup_packet3( drm_radeon_private_t *dev_
|
|||
if (tmp[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
|
||||
| RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
|
||||
offset = tmp[2] << 10;
|
||||
if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &offset ) ) {
|
||||
if (radeon_check_and_fixup_offset
|
||||
(dev_priv, filp_priv, &offset)) {
|
||||
DRM_ERROR("Invalid first packet offset\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
@ -256,7 +263,8 @@ static __inline__ int radeon_check_and_fixup_packet3( drm_radeon_private_t *dev_
|
|||
if ((tmp[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
|
||||
(tmp[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
|
||||
offset = tmp[3] << 10;
|
||||
if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &offset ) ) {
|
||||
if (radeon_check_and_fixup_offset
|
||||
(dev_priv, filp_priv, &offset)) {
|
||||
DRM_ERROR("Invalid second packet offset\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
@ -272,7 +280,6 @@ static __inline__ int radeon_check_and_fixup_packet3( drm_radeon_private_t *dev_
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* CP hardware state programming functions
|
||||
*/
|
||||
|
@ -491,94 +498,98 @@ static struct {
|
|||
int len;
|
||||
const char *name;
|
||||
} packet[RADEON_MAX_STATE_PACKETS] = {
|
||||
{ RADEON_PP_MISC,7,"RADEON_PP_MISC" },
|
||||
{ RADEON_PP_CNTL,3,"RADEON_PP_CNTL" },
|
||||
{ RADEON_RB3D_COLORPITCH,1,"RADEON_RB3D_COLORPITCH" },
|
||||
{ RADEON_RE_LINE_PATTERN,2,"RADEON_RE_LINE_PATTERN" },
|
||||
{ RADEON_SE_LINE_WIDTH,1,"RADEON_SE_LINE_WIDTH" },
|
||||
{ RADEON_PP_LUM_MATRIX,1,"RADEON_PP_LUM_MATRIX" },
|
||||
{ RADEON_PP_ROT_MATRIX_0,2,"RADEON_PP_ROT_MATRIX_0" },
|
||||
{ RADEON_RB3D_STENCILREFMASK,3,"RADEON_RB3D_STENCILREFMASK" },
|
||||
{ RADEON_SE_VPORT_XSCALE,6,"RADEON_SE_VPORT_XSCALE" },
|
||||
{ RADEON_SE_CNTL,2,"RADEON_SE_CNTL" },
|
||||
{ RADEON_SE_CNTL_STATUS,1,"RADEON_SE_CNTL_STATUS" },
|
||||
{ RADEON_RE_MISC,1,"RADEON_RE_MISC" },
|
||||
{ RADEON_PP_TXFILTER_0,6,"RADEON_PP_TXFILTER_0" },
|
||||
{ RADEON_PP_BORDER_COLOR_0,1,"RADEON_PP_BORDER_COLOR_0" },
|
||||
{ RADEON_PP_TXFILTER_1,6,"RADEON_PP_TXFILTER_1" },
|
||||
{ RADEON_PP_BORDER_COLOR_1,1,"RADEON_PP_BORDER_COLOR_1" },
|
||||
{ RADEON_PP_TXFILTER_2,6,"RADEON_PP_TXFILTER_2" },
|
||||
{ RADEON_PP_BORDER_COLOR_2,1,"RADEON_PP_BORDER_COLOR_2" },
|
||||
{ RADEON_SE_ZBIAS_FACTOR,2,"RADEON_SE_ZBIAS_FACTOR" },
|
||||
{ RADEON_SE_TCL_OUTPUT_VTX_FMT,11,"RADEON_SE_TCL_OUTPUT_VTX_FMT" },
|
||||
{ RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED,17,"RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED" },
|
||||
{ R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0" },
|
||||
{ R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1" },
|
||||
{ R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2" },
|
||||
{ R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3" },
|
||||
{ R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4" },
|
||||
{ R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5" },
|
||||
{ R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6" },
|
||||
{ R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7" },
|
||||
{ R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0" },
|
||||
{ R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0" },
|
||||
{ R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0" },
|
||||
{ R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL" },
|
||||
{ R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0" },
|
||||
{ R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2" },
|
||||
{ R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL" },
|
||||
{ R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0" },
|
||||
{ R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1" },
|
||||
{ R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2" },
|
||||
{ R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3" },
|
||||
{ R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4" },
|
||||
{ R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5" },
|
||||
{ R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0" },
|
||||
{ R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1" },
|
||||
{ R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2" },
|
||||
{ R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3" },
|
||||
{ R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4" },
|
||||
{ R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5" },
|
||||
{ R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL" },
|
||||
{ R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, "R200_SE_TCL_OUTPUT_VTX_COMP_SEL" },
|
||||
{ R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3" },
|
||||
{ R200_PP_CNTL_X, 1, "R200_PP_CNTL_X" },
|
||||
{ R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET" },
|
||||
{ R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL" },
|
||||
{ R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0" },
|
||||
{ R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1" },
|
||||
{ R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2" },
|
||||
{ R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS" },
|
||||
{ R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL" },
|
||||
{ R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE" },
|
||||
{ R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4, "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0" },
|
||||
{ R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0" }, /* 61 */
|
||||
{ R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0" }, /* 62 */
|
||||
{ R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1" },
|
||||
{ R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1" },
|
||||
{ R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2" },
|
||||
{ R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2" },
|
||||
{ R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3" },
|
||||
{ R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3" },
|
||||
{ R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4" },
|
||||
{ R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4" },
|
||||
{ R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5" },
|
||||
{ R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5" },
|
||||
{ RADEON_PP_TEX_SIZE_0, 2, "RADEON_PP_TEX_SIZE_0" },
|
||||
{ RADEON_PP_TEX_SIZE_1, 2, "RADEON_PP_TEX_SIZE_1" },
|
||||
{ RADEON_PP_TEX_SIZE_2, 2, "RADEON_PP_TEX_SIZE_2" },
|
||||
{ R200_RB3D_BLENDCOLOR, 3, "R200_RB3D_BLENDCOLOR" },
|
||||
};
|
||||
|
||||
|
||||
{
|
||||
RADEON_PP_MISC, 7, "RADEON_PP_MISC"}, {
|
||||
RADEON_PP_CNTL, 3, "RADEON_PP_CNTL"}, {
|
||||
RADEON_RB3D_COLORPITCH, 1, "RADEON_RB3D_COLORPITCH"}, {
|
||||
RADEON_RE_LINE_PATTERN, 2, "RADEON_RE_LINE_PATTERN"}, {
|
||||
RADEON_SE_LINE_WIDTH, 1, "RADEON_SE_LINE_WIDTH"}, {
|
||||
RADEON_PP_LUM_MATRIX, 1, "RADEON_PP_LUM_MATRIX"}, {
|
||||
RADEON_PP_ROT_MATRIX_0, 2, "RADEON_PP_ROT_MATRIX_0"}, {
|
||||
RADEON_RB3D_STENCILREFMASK, 3, "RADEON_RB3D_STENCILREFMASK"}, {
|
||||
RADEON_SE_VPORT_XSCALE, 6, "RADEON_SE_VPORT_XSCALE"}, {
|
||||
RADEON_SE_CNTL, 2, "RADEON_SE_CNTL"}, {
|
||||
RADEON_SE_CNTL_STATUS, 1, "RADEON_SE_CNTL_STATUS"}, {
|
||||
RADEON_RE_MISC, 1, "RADEON_RE_MISC"}, {
|
||||
RADEON_PP_TXFILTER_0, 6, "RADEON_PP_TXFILTER_0"}, {
|
||||
RADEON_PP_BORDER_COLOR_0, 1, "RADEON_PP_BORDER_COLOR_0"}, {
|
||||
RADEON_PP_TXFILTER_1, 6, "RADEON_PP_TXFILTER_1"}, {
|
||||
RADEON_PP_BORDER_COLOR_1, 1, "RADEON_PP_BORDER_COLOR_1"}, {
|
||||
RADEON_PP_TXFILTER_2, 6, "RADEON_PP_TXFILTER_2"}, {
|
||||
RADEON_PP_BORDER_COLOR_2, 1, "RADEON_PP_BORDER_COLOR_2"}, {
|
||||
RADEON_SE_ZBIAS_FACTOR, 2, "RADEON_SE_ZBIAS_FACTOR"}, {
|
||||
RADEON_SE_TCL_OUTPUT_VTX_FMT, 11, "RADEON_SE_TCL_OUTPUT_VTX_FMT"}, {
|
||||
RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED, 17,
|
||||
"RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED"}, {
|
||||
R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0"}, {
|
||||
R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1"}, {
|
||||
R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2"}, {
|
||||
R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3"}, {
|
||||
R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4"}, {
|
||||
R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5"}, {
|
||||
R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6"}, {
|
||||
R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7"}, {
|
||||
R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0"},
|
||||
{
|
||||
R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0"}, {
|
||||
R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0"}, {
|
||||
R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL"}, {
|
||||
R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0"}, {
|
||||
R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2"}, {
|
||||
R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL"},
|
||||
{
|
||||
R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0"}, {
|
||||
R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1"}, {
|
||||
R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2"}, {
|
||||
R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3"}, {
|
||||
R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4"}, {
|
||||
R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5"}, {
|
||||
R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0"}, {
|
||||
R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1"}, {
|
||||
R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2"}, {
|
||||
R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3"}, {
|
||||
R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"}, {
|
||||
R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"}, {
|
||||
R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"}, {
|
||||
R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
|
||||
{
|
||||
R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"}, {
|
||||
R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"}, {
|
||||
R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"}, {
|
||||
R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL"}, {
|
||||
R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0"}, {
|
||||
R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1"}, {
|
||||
R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2"}, {
|
||||
R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS"}, {
|
||||
R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL"}, {
|
||||
R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE"}, {
|
||||
R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4,
|
||||
"R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"}, {
|
||||
R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"}, /* 61 */
|
||||
{
|
||||
R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */
|
||||
{
|
||||
R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"}, {
|
||||
R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"}, {
|
||||
R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"}, {
|
||||
R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2"}, {
|
||||
R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3"}, {
|
||||
R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3"}, {
|
||||
R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4"}, {
|
||||
R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4"}, {
|
||||
R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5"}, {
|
||||
R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5"}, {
|
||||
RADEON_PP_TEX_SIZE_0, 2, "RADEON_PP_TEX_SIZE_0"}, {
|
||||
RADEON_PP_TEX_SIZE_1, 2, "RADEON_PP_TEX_SIZE_1"}, {
|
||||
RADEON_PP_TEX_SIZE_2, 2, "RADEON_PP_TEX_SIZE_2"}, {
|
||||
R200_RB3D_BLENDCOLOR, 3, "R200_RB3D_BLENDCOLOR"},};
|
||||
|
||||
/* ================================================================
|
||||
* Performance monitoring functions
|
||||
*/
|
||||
|
||||
static void radeon_clear_box(drm_radeon_private_t * dev_priv,
|
||||
int x, int y, int w, int h,
|
||||
int r, int g, int b )
|
||||
int x, int y, int w, int h, int r, int g, int b)
|
||||
{
|
||||
u32 color;
|
||||
RING_LOCALS;
|
||||
|
@ -589,8 +600,7 @@ static void radeon_clear_box( drm_radeon_private_t *dev_priv,
|
|||
switch (dev_priv->color_fmt) {
|
||||
case RADEON_COLOR_FORMAT_RGB565:
|
||||
color = (((r & 0xf8) << 8) |
|
||||
((g & 0xfc) << 3) |
|
||||
((b & 0xf8) >> 3));
|
||||
((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
|
||||
break;
|
||||
case RADEON_COLOR_FORMAT_ARGB8888:
|
||||
default:
|
||||
|
@ -611,8 +621,7 @@ static void radeon_clear_box( drm_radeon_private_t *dev_priv,
|
|||
RADEON_GMC_BRUSH_SOLID_COLOR |
|
||||
(dev_priv->color_fmt << 8) |
|
||||
RADEON_GMC_SRC_DATATYPE_COLOR |
|
||||
RADEON_ROP3_P |
|
||||
RADEON_GMC_CLR_CMP_CNTL_DIS );
|
||||
RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS);
|
||||
|
||||
if (dev_priv->page_flipping && dev_priv->current_page == 1) {
|
||||
OUT_RING(dev_priv->front_pitch_offset);
|
||||
|
@ -665,7 +674,6 @@ static void radeon_cp_performance_boxes( drm_radeon_private_t *dev_priv )
|
|||
if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE))
|
||||
radeon_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
|
||||
|
||||
|
||||
/* Draw bars indicating number of buffers allocated
|
||||
* (not a great measure, easily confused)
|
||||
*/
|
||||
|
@ -681,6 +689,7 @@ static void radeon_cp_performance_boxes( drm_radeon_private_t *dev_priv )
|
|||
memset(&dev_priv->stats, 0, sizeof(dev_priv->stats));
|
||||
|
||||
}
|
||||
|
||||
/* ================================================================
|
||||
* CP command dispatch functions
|
||||
*/
|
||||
|
@ -706,8 +715,10 @@ static void radeon_cp_dispatch_clear( drm_device_t *dev,
|
|||
unsigned int tmp = flags;
|
||||
|
||||
flags &= ~(RADEON_FRONT | RADEON_BACK);
|
||||
if ( tmp & RADEON_FRONT ) flags |= RADEON_BACK;
|
||||
if ( tmp & RADEON_BACK ) flags |= RADEON_FRONT;
|
||||
if (tmp & RADEON_FRONT)
|
||||
flags |= RADEON_BACK;
|
||||
if (tmp & RADEON_BACK)
|
||||
flags |= RADEON_FRONT;
|
||||
}
|
||||
|
||||
if (flags & (RADEON_FRONT | RADEON_BACK)) {
|
||||
|
@ -740,10 +751,12 @@ static void radeon_cp_dispatch_clear( drm_device_t *dev,
|
|||
if (flags & RADEON_FRONT) {
|
||||
BEGIN_RING(6);
|
||||
|
||||
OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) );
|
||||
OUT_RING(CP_PACKET3
|
||||
(RADEON_CNTL_PAINT_MULTI, 4));
|
||||
OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
|
||||
RADEON_GMC_BRUSH_SOLID_COLOR |
|
||||
(dev_priv->color_fmt << 8) |
|
||||
(dev_priv->
|
||||
color_fmt << 8) |
|
||||
RADEON_GMC_SRC_DATATYPE_COLOR |
|
||||
RADEON_ROP3_P |
|
||||
RADEON_GMC_CLR_CMP_CNTL_DIS);
|
||||
|
@ -760,10 +773,12 @@ static void radeon_cp_dispatch_clear( drm_device_t *dev,
|
|||
if (flags & RADEON_BACK) {
|
||||
BEGIN_RING(6);
|
||||
|
||||
OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) );
|
||||
OUT_RING(CP_PACKET3
|
||||
(RADEON_CNTL_PAINT_MULTI, 4));
|
||||
OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
|
||||
RADEON_GMC_BRUSH_SOLID_COLOR |
|
||||
(dev_priv->color_fmt << 8) |
|
||||
(dev_priv->
|
||||
color_fmt << 8) |
|
||||
RADEON_GMC_SRC_DATATYPE_COLOR |
|
||||
RADEON_ROP3_P |
|
||||
RADEON_GMC_CLR_CMP_CNTL_DIS);
|
||||
|
@ -783,8 +798,7 @@ static void radeon_cp_dispatch_clear( drm_device_t *dev,
|
|||
* rendering a quad into just those buffers. Thus, we have to
|
||||
* make sure the 3D engine is configured correctly.
|
||||
*/
|
||||
if ( dev_priv->is_r200 &&
|
||||
(flags & (RADEON_DEPTH | RADEON_STENCIL)) ) {
|
||||
if (dev_priv->is_r200 && (flags & (RADEON_DEPTH | RADEON_STENCIL))) {
|
||||
|
||||
int tempPP_CNTL;
|
||||
int tempRE_CNTL;
|
||||
|
@ -810,20 +824,18 @@ static void radeon_cp_dispatch_clear( drm_device_t *dev,
|
|||
|
||||
tempSE_CNTL = depth_clear->se_cntl;
|
||||
|
||||
|
||||
|
||||
/* Disable TCL */
|
||||
|
||||
tempSE_VAP_CNTL = ( /* SE_VAP_CNTL__FORCE_W_TO_ONE_MASK | */
|
||||
(0x9 << SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT));
|
||||
(0x9 <<
|
||||
SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT));
|
||||
|
||||
tempRB3D_PLANEMASK = 0x0;
|
||||
|
||||
tempRE_AUX_SCISSOR_CNTL = 0x0;
|
||||
|
||||
tempSE_VTE_CNTL =
|
||||
SE_VTE_CNTL__VTX_XY_FMT_MASK |
|
||||
SE_VTE_CNTL__VTX_Z_FMT_MASK;
|
||||
SE_VTE_CNTL__VTX_XY_FMT_MASK | SE_VTE_CNTL__VTX_Z_FMT_MASK;
|
||||
|
||||
/* Vertex format (X, Y, Z, W) */
|
||||
tempSE_VTX_FMT_0 =
|
||||
|
@ -831,7 +843,6 @@ static void radeon_cp_dispatch_clear( drm_device_t *dev,
|
|||
SE_VTX_FMT_0__VTX_W0_PRESENT_MASK;
|
||||
tempSE_VTX_FMT_1 = 0x0;
|
||||
|
||||
|
||||
/*
|
||||
* Depth buffer specific enables
|
||||
*/
|
||||
|
@ -860,8 +871,7 @@ static void radeon_cp_dispatch_clear( drm_device_t *dev,
|
|||
OUT_RING_REG(RADEON_PP_CNTL, tempPP_CNTL);
|
||||
OUT_RING_REG(R200_RE_CNTL, tempRE_CNTL);
|
||||
OUT_RING_REG(RADEON_RB3D_CNTL, tempRB3D_CNTL);
|
||||
OUT_RING_REG( RADEON_RB3D_ZSTENCILCNTL,
|
||||
tempRB3D_ZSTENCILCNTL );
|
||||
OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL);
|
||||
OUT_RING_REG(RADEON_RB3D_STENCILREFMASK,
|
||||
tempRB3D_STENCILREFMASK);
|
||||
OUT_RING_REG(RADEON_RB3D_PLANEMASK, tempRB3D_PLANEMASK);
|
||||
|
@ -870,8 +880,7 @@ static void radeon_cp_dispatch_clear( drm_device_t *dev,
|
|||
OUT_RING_REG(R200_SE_VTX_FMT_0, tempSE_VTX_FMT_0);
|
||||
OUT_RING_REG(R200_SE_VTX_FMT_1, tempSE_VTX_FMT_1);
|
||||
OUT_RING_REG(R200_SE_VAP_CNTL, tempSE_VAP_CNTL);
|
||||
OUT_RING_REG( R200_RE_AUX_SCISSOR_CNTL,
|
||||
tempRE_AUX_SCISSOR_CNTL );
|
||||
OUT_RING_REG(R200_RE_AUX_SCISSOR_CNTL, tempRE_AUX_SCISSOR_CNTL);
|
||||
ADVANCE_RING();
|
||||
|
||||
/* Make sure we restore the 3D state next time.
|
||||
|
@ -883,8 +892,7 @@ static void radeon_cp_dispatch_clear( drm_device_t *dev,
|
|||
/* Funny that this should be required --
|
||||
* sets top-left?
|
||||
*/
|
||||
radeon_emit_clip_rect( dev_priv,
|
||||
&sarea_priv->boxes[i] );
|
||||
radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
|
||||
|
||||
BEGIN_RING(14);
|
||||
OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 12));
|
||||
|
@ -905,8 +913,7 @@ static void radeon_cp_dispatch_clear( drm_device_t *dev,
|
|||
OUT_RING(0x3f800000);
|
||||
ADVANCE_RING();
|
||||
}
|
||||
}
|
||||
else if ( (flags & (RADEON_DEPTH | RADEON_STENCIL)) ) {
|
||||
} else if ((flags & (RADEON_DEPTH | RADEON_STENCIL))) {
|
||||
|
||||
rb3d_cntl = depth_clear->rb3d_cntl;
|
||||
|
||||
|
@ -933,12 +940,9 @@ static void radeon_cp_dispatch_clear( drm_device_t *dev,
|
|||
|
||||
OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL,
|
||||
depth_clear->rb3d_zstencilcntl);
|
||||
OUT_RING_REG( RADEON_RB3D_STENCILREFMASK,
|
||||
rb3d_stencilrefmask );
|
||||
OUT_RING_REG( RADEON_RB3D_PLANEMASK,
|
||||
0x00000000 );
|
||||
OUT_RING_REG( RADEON_SE_CNTL,
|
||||
depth_clear->se_cntl );
|
||||
OUT_RING_REG(RADEON_RB3D_STENCILREFMASK, rb3d_stencilrefmask);
|
||||
OUT_RING_REG(RADEON_RB3D_PLANEMASK, 0x00000000);
|
||||
OUT_RING_REG(RADEON_SE_CNTL, depth_clear->se_cntl);
|
||||
ADVANCE_RING();
|
||||
|
||||
/* Make sure we restore the 3D state next time.
|
||||
|
@ -950,8 +954,7 @@ static void radeon_cp_dispatch_clear( drm_device_t *dev,
|
|||
/* Funny that this should be required --
|
||||
* sets top-left?
|
||||
*/
|
||||
radeon_emit_clip_rect( dev_priv,
|
||||
&sarea_priv->boxes[i] );
|
||||
radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
|
||||
|
||||
BEGIN_RING(15);
|
||||
|
||||
|
@ -964,7 +967,6 @@ static void radeon_cp_dispatch_clear( drm_device_t *dev,
|
|||
RADEON_VTX_FMT_RADEON_MODE |
|
||||
(3 << RADEON_NUM_VERTICES_SHIFT)));
|
||||
|
||||
|
||||
OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
|
||||
OUT_RING(depth_boxes[i].ui[CLEAR_Y1]);
|
||||
OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
|
||||
|
@ -1013,7 +1015,6 @@ static void radeon_cp_dispatch_swap( drm_device_t *dev )
|
|||
if (dev_priv->do_boxes)
|
||||
radeon_cp_performance_boxes(dev_priv);
|
||||
|
||||
|
||||
/* Wait for the 3D stream to idle before dispatching the bitblt.
|
||||
* This will prevent data corruption between the two streams.
|
||||
*/
|
||||
|
@ -1029,8 +1030,7 @@ static void radeon_cp_dispatch_swap( drm_device_t *dev )
|
|||
int w = pbox[i].x2 - x;
|
||||
int h = pbox[i].y2 - y;
|
||||
|
||||
DRM_DEBUG( "dispatch swap %d,%d-%d,%d\n",
|
||||
x, y, w, h );
|
||||
DRM_DEBUG("dispatch swap %d,%d-%d,%d\n", x, y, w, h);
|
||||
|
||||
BEGIN_RING(7);
|
||||
|
||||
|
@ -1042,16 +1042,14 @@ static void radeon_cp_dispatch_swap( drm_device_t *dev )
|
|||
RADEON_GMC_SRC_DATATYPE_COLOR |
|
||||
RADEON_ROP3_S |
|
||||
RADEON_DP_SRC_SOURCE_MEMORY |
|
||||
RADEON_GMC_CLR_CMP_CNTL_DIS |
|
||||
RADEON_GMC_WR_MSK_DIS );
|
||||
RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
|
||||
|
||||
/* Make this work even if front & back are flipped:
|
||||
*/
|
||||
if (dev_priv->current_page == 0) {
|
||||
OUT_RING(dev_priv->back_pitch_offset);
|
||||
OUT_RING(dev_priv->front_pitch_offset);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
OUT_RING(dev_priv->front_pitch_offset);
|
||||
OUT_RING(dev_priv->back_pitch_offset);
|
||||
}
|
||||
|
@ -1086,8 +1084,7 @@ static void radeon_cp_dispatch_flip( drm_device_t *dev )
|
|||
RING_LOCALS;
|
||||
DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
|
||||
__FUNCTION__,
|
||||
dev_priv->current_page,
|
||||
dev_priv->sarea_priv->pfCurrentPage);
|
||||
dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
|
||||
|
||||
/* Do some trivial performance monitoring...
|
||||
*/
|
||||
|
@ -1101,9 +1098,9 @@ static void radeon_cp_dispatch_flip( drm_device_t *dev )
|
|||
BEGIN_RING(6);
|
||||
|
||||
RADEON_WAIT_UNTIL_3D_IDLE();
|
||||
OUT_RING_REG( RADEON_CRTC_OFFSET, ( ( sarea->frame.y * dev_priv->front_pitch
|
||||
+ sarea->frame.x
|
||||
* ( dev_priv->color_fmt - 2 ) ) & ~7 )
|
||||
OUT_RING_REG(RADEON_CRTC_OFFSET,
|
||||
((sarea->frame.y * dev_priv->front_pitch +
|
||||
sarea->frame.x * (dev_priv->color_fmt - 2)) & ~7)
|
||||
+ offset);
|
||||
OUT_RING_REG(RADEON_CRTC2_OFFSET, dev_priv->sarea_priv->crtc2_base
|
||||
+ offset);
|
||||
|
@ -1148,8 +1145,6 @@ static int bad_prim_vertex_nr( int primitive, int nr )
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
typedef struct {
|
||||
unsigned int start;
|
||||
unsigned int finish;
|
||||
|
@ -1162,7 +1157,6 @@ typedef struct {
|
|||
static void radeon_cp_dispatch_vertex(drm_device_t * dev,
|
||||
drm_buf_t * buf,
|
||||
drm_radeon_tcl_prim_t * prim)
|
||||
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
|
||||
|
@ -1174,10 +1168,7 @@ static void radeon_cp_dispatch_vertex( drm_device_t *dev,
|
|||
|
||||
DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n",
|
||||
prim->prim,
|
||||
prim->vc_format,
|
||||
prim->start,
|
||||
prim->finish,
|
||||
prim->numverts);
|
||||
prim->vc_format, prim->start, prim->finish, prim->numverts);
|
||||
|
||||
if (bad_prim_vertex_nr(prim->prim, prim->numverts)) {
|
||||
DRM_ERROR("bad prim %x numverts %d\n",
|
||||
|
@ -1188,8 +1179,7 @@ static void radeon_cp_dispatch_vertex( drm_device_t *dev,
|
|||
do {
|
||||
/* Emit the next cliprect */
|
||||
if (i < nbox) {
|
||||
radeon_emit_clip_rect( dev_priv,
|
||||
&sarea_priv->boxes[i] );
|
||||
radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
|
||||
}
|
||||
|
||||
/* Emit the vertex buffer rendering commands */
|
||||
|
@ -1210,8 +1200,6 @@ static void radeon_cp_dispatch_vertex( drm_device_t *dev,
|
|||
} while (i < nbox);
|
||||
}
|
||||
|
||||
|
||||
|
||||
static void radeon_cp_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -1230,13 +1218,11 @@ static void radeon_cp_discard_buffer( drm_device_t *dev, drm_buf_t *buf )
|
|||
}
|
||||
|
||||
static void radeon_cp_dispatch_indirect(drm_device_t * dev,
|
||||
drm_buf_t *buf,
|
||||
int start, int end )
|
||||
drm_buf_t * buf, int start, int end)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
RING_LOCALS;
|
||||
DRM_DEBUG( "indirect: buf=%d s=0x%x e=0x%x\n",
|
||||
buf->idx, start, end );
|
||||
DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
|
||||
|
||||
if (start != end) {
|
||||
int offset = (dev_priv->gart_buffers_offset
|
||||
|
@ -1265,7 +1251,6 @@ static void radeon_cp_dispatch_indirect( drm_device_t *dev,
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
static void radeon_cp_dispatch_indices(drm_device_t * dev,
|
||||
drm_buf_t * elt_buf,
|
||||
drm_radeon_tcl_prim_t * prim)
|
||||
|
@ -1283,20 +1268,14 @@ static void radeon_cp_dispatch_indices( drm_device_t *dev,
|
|||
DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n",
|
||||
prim->prim,
|
||||
prim->vc_format,
|
||||
prim->start,
|
||||
prim->finish,
|
||||
prim->offset,
|
||||
prim->numverts);
|
||||
prim->start, prim->finish, prim->offset, prim->numverts);
|
||||
|
||||
if (bad_prim_vertex_nr(prim->prim, count)) {
|
||||
DRM_ERROR( "bad prim %x count %d\n",
|
||||
prim->prim, count );
|
||||
DRM_ERROR("bad prim %x count %d\n", prim->prim, count);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
if ( start >= prim->finish ||
|
||||
(prim->start & 0x7) ) {
|
||||
if (start >= prim->finish || (prim->start & 0x7)) {
|
||||
DRM_ERROR("buffer prim %d\n", prim->prim);
|
||||
return;
|
||||
}
|
||||
|
@ -1318,12 +1297,10 @@ static void radeon_cp_dispatch_indices( drm_device_t *dev,
|
|||
|
||||
do {
|
||||
if (i < nbox)
|
||||
radeon_emit_clip_rect( dev_priv,
|
||||
&sarea_priv->boxes[i] );
|
||||
radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
|
||||
|
||||
radeon_cp_dispatch_indirect(dev, elt_buf,
|
||||
prim->start,
|
||||
prim->finish );
|
||||
prim->start, prim->finish);
|
||||
|
||||
i++;
|
||||
} while (i < nbox);
|
||||
|
@ -1376,7 +1353,6 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
|
|||
ADVANCE_RING();
|
||||
#endif
|
||||
|
||||
|
||||
/* The compiler won't optimize away a division by a variable,
|
||||
* even if the only legal values are powers of two. Thus, we'll
|
||||
* use a shift instead.
|
||||
|
@ -1445,10 +1421,10 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
|
|||
return DRM_ERR(EAGAIN);
|
||||
}
|
||||
|
||||
|
||||
/* Dispatch the indirect buffer.
|
||||
*/
|
||||
buffer = (u32*)((char*)dev->agp_buffer_map->handle + buf->offset);
|
||||
buffer =
|
||||
(u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
|
||||
dwords = size / 4;
|
||||
buffer[0] = CP_PACKET3(RADEON_CNTL_HOSTDATA_BLT, dwords + 6);
|
||||
buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
|
||||
|
@ -1484,8 +1460,7 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
|
|||
* width.
|
||||
*/
|
||||
for (i = 0; i < tex->height; i++) {
|
||||
if ( DRM_COPY_FROM_USER( buffer, data,
|
||||
tex_width ) ) {
|
||||
if (DRM_COPY_FROM_USER(buffer, data, tex_width)) {
|
||||
DRM_ERROR("EFAULT on pad, %d bytes\n",
|
||||
tex_width);
|
||||
return DRM_ERR(EFAULT);
|
||||
|
@ -1517,7 +1492,6 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void radeon_cp_dispatch_stipple(drm_device_t * dev, u32 * stipple)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -1538,7 +1512,6 @@ static void radeon_cp_dispatch_stipple( drm_device_t *dev, u32 *stipple )
|
|||
ADVANCE_RING();
|
||||
}
|
||||
|
||||
|
||||
/* ================================================================
|
||||
* IOCTL functions
|
||||
*/
|
||||
|
@ -1572,7 +1545,6 @@ int radeon_cp_clear( DRM_IOCTL_ARGS )
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* Not sure why this isn't set all the time:
|
||||
*/
|
||||
static int radeon_do_init_pageflip(drm_device_t * dev)
|
||||
|
@ -1585,9 +1557,11 @@ static int radeon_do_init_pageflip( drm_device_t *dev )
|
|||
BEGIN_RING(6);
|
||||
RADEON_WAIT_UNTIL_3D_IDLE();
|
||||
OUT_RING(CP_PACKET0(RADEON_CRTC_OFFSET_CNTL, 0));
|
||||
OUT_RING( RADEON_READ( RADEON_CRTC_OFFSET_CNTL ) | RADEON_CRTC_OFFSET_FLIP_CNTL );
|
||||
OUT_RING(RADEON_READ(RADEON_CRTC_OFFSET_CNTL) |
|
||||
RADEON_CRTC_OFFSET_FLIP_CNTL);
|
||||
OUT_RING(CP_PACKET0(RADEON_CRTC2_OFFSET_CNTL, 0));
|
||||
OUT_RING( RADEON_READ( RADEON_CRTC2_OFFSET_CNTL ) | RADEON_CRTC_OFFSET_FLIP_CNTL );
|
||||
OUT_RING(RADEON_READ(RADEON_CRTC2_OFFSET_CNTL) |
|
||||
RADEON_CRTC_OFFSET_FLIP_CNTL);
|
||||
ADVANCE_RING();
|
||||
|
||||
dev_priv->page_flipping = 1;
|
||||
|
@ -1679,16 +1653,14 @@ int radeon_cp_vertex( DRM_IOCTL_ARGS )
|
|||
sizeof(vertex));
|
||||
|
||||
DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
|
||||
DRM_CURRENTPID,
|
||||
vertex.idx, vertex.count, vertex.discard );
|
||||
DRM_CURRENTPID, vertex.idx, vertex.count, vertex.discard);
|
||||
|
||||
if (vertex.idx < 0 || vertex.idx >= dma->buf_count) {
|
||||
DRM_ERROR("buffer index %d (of %d max)\n",
|
||||
vertex.idx, dma->buf_count - 1);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
if ( vertex.prim < 0 ||
|
||||
vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST ) {
|
||||
if (vertex.prim < 0 || vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
|
||||
DRM_ERROR("buffer prim %d\n", vertex.prim);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
@ -1770,16 +1742,14 @@ int radeon_cp_indices( DRM_IOCTL_ARGS )
|
|||
sizeof(elts));
|
||||
|
||||
DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n",
|
||||
DRM_CURRENTPID,
|
||||
elts.idx, elts.start, elts.end, elts.discard );
|
||||
DRM_CURRENTPID, elts.idx, elts.start, elts.end, elts.discard);
|
||||
|
||||
if (elts.idx < 0 || elts.idx >= dma->buf_count) {
|
||||
DRM_ERROR("buffer index %d (of %d max)\n",
|
||||
elts.idx, dma->buf_count - 1);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
if ( elts.prim < 0 ||
|
||||
elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST ) {
|
||||
if (elts.prim < 0 || elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
|
||||
DRM_ERROR("buffer prim %d\n", elts.prim);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
@ -1828,7 +1798,6 @@ int radeon_cp_indices( DRM_IOCTL_ARGS )
|
|||
RADEON_REQUIRE_QUIESCENCE);
|
||||
}
|
||||
|
||||
|
||||
/* Build up a prim_t record:
|
||||
*/
|
||||
prim.start = elts.start;
|
||||
|
@ -1857,7 +1826,8 @@ int radeon_cp_texture( DRM_IOCTL_ARGS )
|
|||
|
||||
LOCK_TEST_WITH_RETURN(dev, filp);
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL( tex, (drm_radeon_texture_t __user *)data, sizeof(tex) );
|
||||
DRM_COPY_FROM_USER_IOCTL(tex, (drm_radeon_texture_t __user *) data,
|
||||
sizeof(tex));
|
||||
|
||||
if (tex.image == NULL) {
|
||||
DRM_ERROR("null texture image!\n");
|
||||
|
@ -1917,12 +1887,12 @@ int radeon_cp_indirect( DRM_IOCTL_ARGS )
|
|||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL( indirect, (drm_radeon_indirect_t __user *)data,
|
||||
DRM_COPY_FROM_USER_IOCTL(indirect,
|
||||
(drm_radeon_indirect_t __user *) data,
|
||||
sizeof(indirect));
|
||||
|
||||
DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n",
|
||||
indirect.idx, indirect.start,
|
||||
indirect.end, indirect.discard );
|
||||
indirect.idx, indirect.start, indirect.end, indirect.discard);
|
||||
|
||||
if (indirect.idx < 0 || indirect.idx >= dma->buf_count) {
|
||||
DRM_ERROR("buffer index %d (of %d max)\n",
|
||||
|
@ -1971,7 +1941,6 @@ int radeon_cp_indirect( DRM_IOCTL_ARGS )
|
|||
radeon_cp_discard_buffer(dev, buf);
|
||||
}
|
||||
|
||||
|
||||
COMMIT_RING();
|
||||
return 0;
|
||||
}
|
||||
|
@ -2001,8 +1970,7 @@ int radeon_cp_vertex2( DRM_IOCTL_ARGS )
|
|||
sizeof(vertex));
|
||||
|
||||
DRM_DEBUG("pid=%d index=%d discard=%d\n",
|
||||
DRM_CURRENTPID,
|
||||
vertex.idx, vertex.discard );
|
||||
DRM_CURRENTPID, vertex.idx, vertex.discard);
|
||||
|
||||
if (vertex.idx < 0 || vertex.idx >= dma->buf_count) {
|
||||
DRM_ERROR("buffer index %d (of %d max)\n",
|
||||
|
@ -2081,9 +2049,7 @@ int radeon_cp_vertex2( DRM_IOCTL_ARGS )
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int radeon_emit_packets(
|
||||
drm_radeon_private_t *dev_priv,
|
||||
static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
|
||||
drm_file_t * filp_priv,
|
||||
drm_radeon_cmd_header_t header,
|
||||
drm_radeon_cmd_buffer_t * cmdbuf)
|
||||
|
@ -2119,8 +2085,7 @@ static int radeon_emit_packets(
|
|||
return 0;
|
||||
}
|
||||
|
||||
static __inline__ int radeon_emit_scalars(
|
||||
drm_radeon_private_t *dev_priv,
|
||||
static __inline__ int radeon_emit_scalars(drm_radeon_private_t * dev_priv,
|
||||
drm_radeon_cmd_header_t header,
|
||||
drm_radeon_cmd_buffer_t * cmdbuf)
|
||||
{
|
||||
|
@ -2143,8 +2108,7 @@ static __inline__ int radeon_emit_scalars(
|
|||
|
||||
/* God this is ugly
|
||||
*/
|
||||
static __inline__ int radeon_emit_scalars2(
|
||||
drm_radeon_private_t *dev_priv,
|
||||
static __inline__ int radeon_emit_scalars2(drm_radeon_private_t * dev_priv,
|
||||
drm_radeon_cmd_header_t header,
|
||||
drm_radeon_cmd_buffer_t * cmdbuf)
|
||||
{
|
||||
|
@ -2165,8 +2129,7 @@ static __inline__ int radeon_emit_scalars2(
|
|||
return 0;
|
||||
}
|
||||
|
||||
static __inline__ int radeon_emit_vectors(
|
||||
drm_radeon_private_t *dev_priv,
|
||||
static __inline__ int radeon_emit_vectors(drm_radeon_private_t * dev_priv,
|
||||
drm_radeon_cmd_header_t header,
|
||||
drm_radeon_cmd_buffer_t * cmdbuf)
|
||||
{
|
||||
|
@ -2188,7 +2151,6 @@ static __inline__ int radeon_emit_vectors(
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int radeon_emit_packet3(drm_device_t * dev,
|
||||
drm_file_t * filp_priv,
|
||||
drm_radeon_cmd_buffer_t * cmdbuf)
|
||||
|
@ -2216,7 +2178,6 @@ static int radeon_emit_packet3( drm_device_t *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int radeon_emit_packet3_cliprect(drm_device_t * dev,
|
||||
drm_file_t * filp_priv,
|
||||
drm_radeon_cmd_buffer_t * cmdbuf,
|
||||
|
@ -2244,7 +2205,8 @@ static int radeon_emit_packet3_cliprect( drm_device_t *dev,
|
|||
|
||||
do {
|
||||
if (i < cmdbuf->nbox) {
|
||||
if (DRM_COPY_FROM_USER_UNCHECKED( &box, &boxes[i], sizeof(box) ))
|
||||
if (DRM_COPY_FROM_USER_UNCHECKED
|
||||
(&box, &boxes[i], sizeof(box)))
|
||||
return DRM_ERR(EFAULT);
|
||||
/* FIXME The second and subsequent times round
|
||||
* this loop, send a WAIT_UNTIL_3D_IDLE before
|
||||
|
@ -2280,7 +2242,6 @@ static int radeon_emit_packet3_cliprect( drm_device_t *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int radeon_emit_wait(drm_device_t * dev, int flags)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -2331,13 +2292,13 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
|
|||
|
||||
DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL( cmdbuf, (drm_radeon_cmd_buffer_t __user *)data,
|
||||
DRM_COPY_FROM_USER_IOCTL(cmdbuf,
|
||||
(drm_radeon_cmd_buffer_t __user *) data,
|
||||
sizeof(cmdbuf));
|
||||
|
||||
RING_SPACE_TEST_WITH_RETURN(dev_priv);
|
||||
VB_AGE_TEST_WITH_RETURN(dev_priv);
|
||||
|
||||
|
||||
if (DRM_VERIFYAREA_READ(cmdbuf.buf, cmdbuf.bufsz))
|
||||
return DRM_ERR(EFAULT);
|
||||
|
||||
|
@ -2361,7 +2322,8 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
|
|||
switch (header.header.cmd_type) {
|
||||
case RADEON_CMD_PACKET:
|
||||
DRM_DEBUG("RADEON_CMD_PACKET\n");
|
||||
if (radeon_emit_packets( dev_priv, filp_priv, header, &cmdbuf )) {
|
||||
if (radeon_emit_packets
|
||||
(dev_priv, filp_priv, header, &cmdbuf)) {
|
||||
DRM_ERROR("radeon_emit_packets failed\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
@ -2412,7 +2374,8 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
|
|||
|
||||
case RADEON_CMD_PACKET3_CLIP:
|
||||
DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n");
|
||||
if (radeon_emit_packet3_cliprect( dev, filp_priv, &cmdbuf, orig_nbox )) {
|
||||
if (radeon_emit_packet3_cliprect
|
||||
(dev, filp_priv, &cmdbuf, orig_nbox)) {
|
||||
DRM_ERROR("radeon_emit_packet3_clip failed\n");
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
@ -2441,14 +2404,11 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
DRM_DEBUG("DONE\n");
|
||||
COMMIT_RING();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
int radeon_cp_getparam(DRM_IOCTL_ARGS)
|
||||
{
|
||||
DRM_DEVICE;
|
||||
|
@ -2523,7 +2483,8 @@ int radeon_cp_getparam( DRM_IOCTL_ARGS )
|
|||
return 0;
|
||||
}
|
||||
|
||||
int radeon_cp_setparam( DRM_IOCTL_ARGS ) {
|
||||
int radeon_cp_setparam(DRM_IOCTL_ARGS)
|
||||
{
|
||||
DRM_DEVICE;
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
drm_file_t *filp_priv;
|
||||
|
@ -2581,7 +2542,9 @@ int radeon_driver_open_helper(drm_device_t *dev, drm_file_t *filp_priv)
|
|||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_radeon_driver_file_fields *radeon_priv;
|
||||
|
||||
radeon_priv = (struct drm_radeon_driver_file_fields *)drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES);
|
||||
radeon_priv =
|
||||
(struct drm_radeon_driver_file_fields *)
|
||||
drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES);
|
||||
|
||||
if (!radeon_priv)
|
||||
return -ENOMEM;
|
||||
|
@ -2597,8 +2560,8 @@ int radeon_driver_open_helper(drm_device_t *dev, drm_file_t *filp_priv)
|
|||
|
||||
void radeon_driver_free_filp_priv(drm_device_t * dev, drm_file_t * filp_priv)
|
||||
{
|
||||
struct drm_radeon_driver_file_fields *radeon_priv = filp_priv->driver_priv;
|
||||
struct drm_radeon_driver_file_fields *radeon_priv =
|
||||
filp_priv->driver_priv;
|
||||
|
||||
drm_free(radeon_priv, sizeof(*radeon_priv), DRM_MEM_FILES);
|
||||
}
|
||||
|
||||
|
|
|
@ -158,8 +158,7 @@ int setDestroy(set_t *set)
|
|||
|
||||
#define ISFREE(bptr) ((bptr)->free)
|
||||
|
||||
memHeap_t *mmInit(int ofs,
|
||||
int size)
|
||||
memHeap_t *mmInit(int ofs, int size)
|
||||
{
|
||||
PMemBlock blocks;
|
||||
|
||||
|
@ -196,9 +195,7 @@ int mmBlockInHeap(memHeap_t *heap, PMemBlock b)
|
|||
|
||||
/* Kludgey workaround for existing i810 server. Remove soon.
|
||||
*/
|
||||
memHeap_t *mmAddRange( memHeap_t *heap,
|
||||
int ofs,
|
||||
int size )
|
||||
memHeap_t *mmAddRange(memHeap_t * heap, int ofs, int size)
|
||||
{
|
||||
PMemBlock blocks;
|
||||
blocks = (TMemBlock *) drm_calloc(2, sizeof(TMemBlock), DRM_MEM_DRIVER);
|
||||
|
|
|
@ -115,9 +115,7 @@ static __inline__ void mmMarkReserved(PMemBlock b)
|
|||
*/
|
||||
memHeap_t *mmInit(int ofs, int size);
|
||||
|
||||
memHeap_t *mmAddRange( memHeap_t *heap,
|
||||
int ofs,
|
||||
int size );
|
||||
memHeap_t *mmAddRange(memHeap_t * heap, int ofs, int size);
|
||||
|
||||
/*
|
||||
* Allocate 'size' bytes with 2^align2 bytes alignment,
|
||||
|
|
|
@ -28,10 +28,6 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "sis_drm.h"
|
||||
#include "sis_drv.h"
|
||||
#include "sis_ds.h"
|
||||
#if defined(__linux__) && defined(CONFIG_FB_SIS)
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
|
||||
#include <video/sisfb.h>
|
||||
|
@ -39,6 +35,10 @@
|
|||
#include <linux/sisfb.h>
|
||||
#endif
|
||||
#endif
|
||||
#include "drmP.h"
|
||||
#include "sis_drm.h"
|
||||
#include "sis_drv.h"
|
||||
#include "sis_ds.h"
|
||||
|
||||
#define MAX_CONTEXT 100
|
||||
#define VIDEO_TYPE 0
|
||||
|
@ -52,14 +52,12 @@ typedef struct {
|
|||
|
||||
static sis_context_t global_ppriv[MAX_CONTEXT];
|
||||
|
||||
|
||||
static int add_alloc_set(int context, int type, unsigned int val)
|
||||
{
|
||||
int i, retval = 0;
|
||||
|
||||
for (i = 0; i < MAX_CONTEXT; i++) {
|
||||
if (global_ppriv[i].used && global_ppriv[i].context == context)
|
||||
{
|
||||
if (global_ppriv[i].used && global_ppriv[i].context == context) {
|
||||
retval = setAdd(global_ppriv[i].sets[type], val);
|
||||
break;
|
||||
}
|
||||
|
@ -72,8 +70,7 @@ static int del_alloc_set(int context, int type, unsigned int val)
|
|||
int i, retval = 0;
|
||||
|
||||
for (i = 0; i < MAX_CONTEXT; i++) {
|
||||
if (global_ppriv[i].used && global_ppriv[i].context == context)
|
||||
{
|
||||
if (global_ppriv[i].used && global_ppriv[i].context == context) {
|
||||
retval = setDel(global_ppriv[i].sets[type], val);
|
||||
break;
|
||||
}
|
||||
|
@ -260,7 +257,8 @@ int sis_ioctl_agp_init( DRM_IOCTL_ARGS )
|
|||
if (dev_priv->AGPHeap != NULL)
|
||||
return DRM_ERR(EINVAL);
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t __user *)data, sizeof(agp));
|
||||
DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t __user *) data,
|
||||
sizeof(agp));
|
||||
|
||||
dev_priv->AGPHeap = mmInit(agp.offset, agp.size);
|
||||
|
||||
|
@ -315,7 +313,8 @@ int sis_ioctl_agp_free( DRM_IOCTL_ARGS )
|
|||
if (dev_priv == NULL || dev_priv->AGPHeap == NULL)
|
||||
return DRM_ERR(EINVAL);
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_mem_t __user *)data, sizeof(agp));
|
||||
DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_mem_t __user *) data,
|
||||
sizeof(agp));
|
||||
|
||||
if (!mmBlockInHeap(dev_priv->AGPHeap, (PMemBlock) agp.free))
|
||||
return DRM_ERR(EINVAL);
|
||||
|
@ -352,8 +351,7 @@ int sis_init_context(struct drm_device *dev, int context)
|
|||
}
|
||||
}
|
||||
if ((i >= MAX_CONTEXT) || (global_ppriv[i].sets[0] == NULL) ||
|
||||
(global_ppriv[i].sets[1] == NULL))
|
||||
{
|
||||
(global_ppriv[i].sets[1] == NULL)) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,10 +30,6 @@
|
|||
#ifndef __TDFX_H__
|
||||
#define __TDFX_H__
|
||||
|
||||
/* This remains constant for all DRM template files.
|
||||
*/
|
||||
#define DRM(x) tdfx_##x
|
||||
|
||||
/* General customization:
|
||||
*/
|
||||
|
||||
|
|
|
@ -42,7 +42,8 @@ via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
|
|||
do {
|
||||
hw_addr = *hw_addr_ptr;
|
||||
if (count-- == 0) {
|
||||
DRM_ERROR("via_cmdbuf_wait timed out hw %x dma_low %x\n",
|
||||
DRM_ERROR
|
||||
("via_cmdbuf_wait timed out hw %x dma_low %x\n",
|
||||
hw_addr, dev_priv->dma_low);
|
||||
return -1;
|
||||
}
|
||||
|
@ -56,8 +57,8 @@ via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
|
|||
*
|
||||
* Returns virtual pointer to ring buffer.
|
||||
*/
|
||||
static inline uint32_t *
|
||||
via_check_dma(drm_via_private_t * dev_priv, unsigned int size)
|
||||
static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
|
||||
unsigned int size)
|
||||
{
|
||||
if ((dev_priv->dma_low + size + 0x400) > dev_priv->dma_high) {
|
||||
via_cmdbuf_rewind(dev_priv);
|
||||
|
@ -86,7 +87,6 @@ int via_dma_cleanup(drm_device_t *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int via_initialize(drm_device_t * dev,
|
||||
drm_via_private_t * dev_priv,
|
||||
drm_via_dma_init_t * init)
|
||||
|
@ -131,7 +131,6 @@ static int via_initialize(drm_device_t *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int via_dma_init(DRM_IOCTL_ARGS)
|
||||
{
|
||||
DRM_DEVICE;
|
||||
|
@ -139,7 +138,8 @@ int via_dma_init( DRM_IOCTL_ARGS )
|
|||
drm_via_dma_init_t init;
|
||||
int retcode = 0;
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(init, (drm_via_dma_init_t *)data, sizeof(init));
|
||||
DRM_COPY_FROM_USER_IOCTL(init, (drm_via_dma_init_t *) data,
|
||||
sizeof(init));
|
||||
|
||||
switch (init.func) {
|
||||
case VIA_INIT_DMA:
|
||||
|
@ -156,9 +156,7 @@ int via_dma_init( DRM_IOCTL_ARGS )
|
|||
return retcode;
|
||||
}
|
||||
|
||||
|
||||
static int via_dispatch_cmdbuffer(drm_device_t *dev,
|
||||
drm_via_cmdbuffer_t *cmd )
|
||||
static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd)
|
||||
{
|
||||
drm_via_private_t *dev_priv = dev->dev_private;
|
||||
uint32_t *vb;
|
||||
|
@ -175,7 +173,6 @@ static int via_dispatch_cmdbuffer(drm_device_t *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int via_quiescent(drm_device_t * dev)
|
||||
{
|
||||
drm_via_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -186,7 +183,6 @@ static int via_quiescent(drm_device_t *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int via_flush_ioctl(DRM_IOCTL_ARGS)
|
||||
{
|
||||
DRM_DEVICE;
|
||||
|
@ -199,7 +195,6 @@ int via_flush_ioctl( DRM_IOCTL_ARGS )
|
|||
return via_quiescent(dev);
|
||||
}
|
||||
|
||||
|
||||
int via_cmdbuffer(DRM_IOCTL_ARGS)
|
||||
{
|
||||
DRM_DEVICE;
|
||||
|
@ -281,13 +276,12 @@ static int via_dispatch_pci_cmdbuffer(drm_device_t *dev,
|
|||
} else {
|
||||
if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
|
||||
return DRM_ERR(EFAULT);
|
||||
ret = via_parse_pci_cmdbuffer( dev, dev_priv->pci_buf, cmd->size );
|
||||
ret =
|
||||
via_parse_pci_cmdbuffer(dev, dev_priv->pci_buf, cmd->size);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
||||
int via_pci_cmdbuffer(DRM_IOCTL_ARGS)
|
||||
{
|
||||
DRM_DEVICE;
|
||||
|
@ -297,7 +291,8 @@ int via_pci_cmdbuffer( DRM_IOCTL_ARGS )
|
|||
DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t *) data,
|
||||
sizeof(cmdbuf));
|
||||
|
||||
DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf.buf, cmdbuf.size);
|
||||
DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf.buf,
|
||||
cmdbuf.size);
|
||||
|
||||
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
|
||||
DRM_ERROR("via_pci_cmdbuffer called without lock held\n");
|
||||
|
@ -312,9 +307,6 @@ int via_pci_cmdbuffer( DRM_IOCTL_ARGS )
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/************************************************************************/
|
||||
#include "via_3d_reg.h"
|
||||
|
||||
|
@ -332,7 +324,6 @@ int via_pci_cmdbuffer( DRM_IOCTL_ARGS )
|
|||
#define VIA_3D_ENG_BUSY 0x00000002 /* 3D Engine is busy */
|
||||
#define VIA_VR_QUEUE_BUSY 0x00020000 /* Virtual Queue is busy */
|
||||
|
||||
|
||||
#define SetReg2DAGP(nReg, nData) { \
|
||||
*((uint32_t *)(vb)) = ((nReg) >> 2) | 0xF0000000; \
|
||||
*((uint32_t *)(vb) + 1) = (nData); \
|
||||
|
@ -342,8 +333,8 @@ int via_pci_cmdbuffer( DRM_IOCTL_ARGS )
|
|||
|
||||
static uint32_t via_swap_count = 0;
|
||||
|
||||
static inline uint32_t *
|
||||
via_align_buffer(drm_via_private_t * dev_priv, uint32_t * vb, int qw_count)
|
||||
static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
|
||||
uint32_t * vb, int qw_count)
|
||||
{
|
||||
for (; qw_count > 0; --qw_count) {
|
||||
*vb++ = (0xcc000000 | (dev_priv->dma_low & 0xffffff));
|
||||
|
@ -364,17 +355,16 @@ static inline uint32_t * via_get_dma(drm_via_private_t * dev_priv)
|
|||
return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
|
||||
}
|
||||
|
||||
|
||||
static int via_wait_idle(drm_via_private_t * dev_priv)
|
||||
{
|
||||
int count = 10000000;
|
||||
while (count-- && (VIA_READ(VIA_REG_STATUS) &
|
||||
(VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY | VIA_3D_ENG_BUSY)));
|
||||
(VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
|
||||
VIA_3D_ENG_BUSY))) ;
|
||||
return count;
|
||||
}
|
||||
|
||||
static inline void
|
||||
via_dummy_bitblt(drm_via_private_t * dev_priv)
|
||||
static inline void via_dummy_bitblt(drm_via_private_t * dev_priv)
|
||||
{
|
||||
uint32_t *vb = via_get_dma(dev_priv);
|
||||
/* GEDST */
|
||||
|
@ -417,8 +407,7 @@ static void via_cmdbuf_start(drm_via_private_t * dev_priv)
|
|||
|
||||
pause_addr = agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
|
||||
pause_addr_lo = ((HC_SubA_HAGPBpL << 24) |
|
||||
HC_HAGPBpID_PAUSE |
|
||||
(pause_addr & 0xffffff));
|
||||
HC_HAGPBpID_PAUSE | (pause_addr & 0xffffff));
|
||||
pause_addr_hi = ((HC_SubA_HAGPBpH << 24) | (pause_addr >> 24));
|
||||
|
||||
vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
|
||||
|
@ -491,7 +480,6 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
|
|||
|
||||
vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
|
||||
|
||||
|
||||
/* Now at beginning of buffer, make sure engine will pause here. */
|
||||
dev_priv->dma_low = 0;
|
||||
if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0) {
|
||||
|
@ -597,5 +585,3 @@ static void via_cmdbuf_reset(drm_via_private_t * dev_priv)
|
|||
via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
|
||||
via_wait_idle(dev_priv);
|
||||
}
|
||||
|
||||
/************************************************************************/
|
||||
|
|
|
@ -85,7 +85,6 @@
|
|||
#define DRM_IOCTL_VIA_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_VIA_FLUSH)
|
||||
#define DRM_IOCTL_VIA_PCICMD DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_PCICMD, drm_via_cmdbuffer_t)
|
||||
|
||||
|
||||
/* Indices into buf.Setup where various bits of state are mirrored per
|
||||
* context and per buffer. These can be fired at the card as a unit,
|
||||
* or in a piecewise fashion as required.
|
||||
|
@ -190,7 +189,6 @@ typedef struct _drm_via_sarea {
|
|||
|
||||
} drm_via_sarea_t;
|
||||
|
||||
|
||||
typedef struct _drm_via_flush_agp {
|
||||
unsigned int offset;
|
||||
unsigned int size;
|
||||
|
|
|
@ -46,9 +46,7 @@ static int postinit( struct drm_device *dev, unsigned long flags )
|
|||
DRIVER_MAJOR,
|
||||
DRIVER_MINOR,
|
||||
DRIVER_PATCHLEVEL,
|
||||
DRIVER_DATE,
|
||||
dev->minor,
|
||||
pci_pretty_name(dev->pdev)
|
||||
DRIVER_DATE, dev->minor, pci_pretty_name(dev->pdev)
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
|
@ -84,7 +82,9 @@ static drm_ioctl_desc_t ioctls[] = {
|
|||
};
|
||||
|
||||
static struct drm_driver_fn driver_fn = {
|
||||
.driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
|
||||
DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
|
||||
.context_ctor = via_init_context,
|
||||
.context_dtor = via_final_context,
|
||||
.vblank_wait = via_driver_vblank_wait,
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
|
||||
#include "via_drm.h"
|
||||
|
||||
|
||||
typedef struct drm_via_ring_buffer {
|
||||
drm_map_t map;
|
||||
char *virtual_start;
|
||||
|
@ -49,7 +48,6 @@ typedef struct drm_via_private {
|
|||
char pci_buf[VIA_PREALLOCATED_PCI_SIZE];
|
||||
} drm_via_private_t;
|
||||
|
||||
|
||||
/* VIA MMIO register access */
|
||||
#define VIA_BASE ((dev_priv->mmio))
|
||||
|
||||
|
|
|
@ -28,8 +28,8 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/poll.h>
|
||||
#include <asm/io.h>
|
||||
#include <linux/pci.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#include "via_ds.h"
|
||||
extern unsigned int VIA_DEBUG;
|
||||
|
@ -56,8 +56,7 @@ int via_setAdd(set_t *set, ITEM_TYPE item)
|
|||
if (free != -1) {
|
||||
set->list[free].val = item;
|
||||
set->free = set->list[free].free_next;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
set->list[free].alloc_next = set->alloc;
|
||||
|
@ -74,7 +73,8 @@ int via_setDel(set_t *set, ITEM_TYPE item)
|
|||
while (alloc != -1) {
|
||||
if (set->list[alloc].val == item) {
|
||||
if (prev != -1)
|
||||
set->list[prev].alloc_next = set->list[alloc].alloc_next;
|
||||
set->list[prev].alloc_next =
|
||||
set->list[alloc].alloc_next;
|
||||
else
|
||||
set->alloc = set->list[alloc].alloc_next;
|
||||
break;
|
||||
|
@ -103,7 +103,6 @@ int via_setFirst(set_t *set, ITEM_TYPE *item)
|
|||
*item = set->list[set->alloc].val;
|
||||
set->trace = set->list[set->alloc].alloc_next;
|
||||
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -142,8 +141,8 @@ void via_mmDumpMemInfo( memHeap_t *heap )
|
|||
p = (TMemBlock *) heap;
|
||||
|
||||
while (p) {
|
||||
PRINTF (" Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size,
|
||||
p->free ? '.':'U',
|
||||
PRINTF(" Offset:%08x, Size:%08x, %c%c\n", p->ofs,
|
||||
p->size, p->free ? '.' : 'U',
|
||||
p->reserved ? 'R' : '.');
|
||||
p = p->next;
|
||||
}
|
||||
|
@ -152,15 +151,13 @@ void via_mmDumpMemInfo( memHeap_t *heap )
|
|||
PRINTF("End of memory blocks\n");
|
||||
}
|
||||
|
||||
memHeap_t *via_mmInit(int ofs,
|
||||
int size)
|
||||
memHeap_t *via_mmInit(int ofs, int size)
|
||||
{
|
||||
PMemBlock blocks;
|
||||
|
||||
if (size <= 0)
|
||||
return 0;
|
||||
|
||||
|
||||
blocks = (TMemBlock *) drm_calloc(1, sizeof(TMemBlock), DRM_MEM_DRIVER);
|
||||
|
||||
if (blocks) {
|
||||
|
@ -172,9 +169,7 @@ memHeap_t *via_mmInit(int ofs,
|
|||
return 0;
|
||||
}
|
||||
|
||||
memHeap_t *via_mmAddRange(memHeap_t *heap,
|
||||
int ofs,
|
||||
int size)
|
||||
memHeap_t *via_mmAddRange(memHeap_t * heap, int ofs, int size)
|
||||
{
|
||||
PMemBlock blocks;
|
||||
blocks = (TMemBlock *) drm_calloc(2, sizeof(TMemBlock), DRM_MEM_DRIVER);
|
||||
|
@ -193,8 +188,7 @@ memHeap_t *via_mmAddRange(memHeap_t *heap,
|
|||
blocks[1].ofs = ofs + size;
|
||||
blocks[1].next = (PMemBlock) heap;
|
||||
return (memHeap_t *) blocks;
|
||||
}
|
||||
else
|
||||
} else
|
||||
return heap;
|
||||
}
|
||||
|
||||
|
@ -206,7 +200,9 @@ static TMemBlock* SliceBlock(TMemBlock *p,
|
|||
|
||||
/* break left */
|
||||
if (startofs > p->ofs) {
|
||||
newblock = (TMemBlock*)drm_calloc(1,sizeof(TMemBlock),DRM_MEM_DRIVER);
|
||||
newblock =
|
||||
(TMemBlock *) drm_calloc(1, sizeof(TMemBlock),
|
||||
DRM_MEM_DRIVER);
|
||||
newblock->ofs = startofs;
|
||||
newblock->size = p->size - (startofs - p->ofs);
|
||||
newblock->free = 1;
|
||||
|
@ -218,7 +214,9 @@ static TMemBlock* SliceBlock(TMemBlock *p,
|
|||
|
||||
/* break right */
|
||||
if (size < p->size) {
|
||||
newblock = (TMemBlock*)drm_calloc(1,sizeof(TMemBlock),DRM_MEM_DRIVER);
|
||||
newblock =
|
||||
(TMemBlock *) drm_calloc(1, sizeof(TMemBlock),
|
||||
DRM_MEM_DRIVER);
|
||||
newblock->ofs = startofs + size;
|
||||
newblock->size = p->size - size;
|
||||
newblock->free = 1;
|
||||
|
@ -234,7 +232,8 @@ static TMemBlock* SliceBlock(TMemBlock *p,
|
|||
return p;
|
||||
}
|
||||
|
||||
PMemBlock via_mmAllocMem(memHeap_t *heap, int size, int align2, int startSearch)
|
||||
PMemBlock via_mmAllocMem(memHeap_t * heap, int size, int align2,
|
||||
int startSearch)
|
||||
{
|
||||
int mask, startofs, endofs;
|
||||
TMemBlock *p;
|
||||
|
@ -314,7 +313,6 @@ int via_mmFreeMem(PMemBlock b)
|
|||
else
|
||||
fprintf(stderr, "block is reserved\n");
|
||||
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -53,7 +53,6 @@ int via_setDestroy(set_t *set);
|
|||
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef MM_INC
|
||||
#define MM_INC
|
||||
|
||||
|
@ -72,13 +71,19 @@ typedef struct mem_block_t *PMemBlock;
|
|||
typedef struct mem_block_t memHeap_t;
|
||||
|
||||
static __inline__ int mmBlockSize(PMemBlock b)
|
||||
{ return b->size; }
|
||||
{
|
||||
return b->size;
|
||||
}
|
||||
|
||||
static __inline__ int mmOffset(PMemBlock b)
|
||||
{ return b->ofs; }
|
||||
{
|
||||
return b->ofs;
|
||||
}
|
||||
|
||||
static __inline__ void mmMarkReserved(PMemBlock b)
|
||||
{ b->reserved = 1; }
|
||||
{
|
||||
b->reserved = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* input: total size in bytes
|
||||
|
@ -86,8 +91,8 @@ static __inline__ void mmMarkReserved(PMemBlock b)
|
|||
*/
|
||||
memHeap_t *via_mmInit(int ofs, int size);
|
||||
|
||||
|
||||
PMemBlock via_mmAllocMem(memHeap_t *heap, int size, int align2, int startSearch);
|
||||
PMemBlock via_mmAllocMem(memHeap_t * heap, int size, int align2,
|
||||
int startSearch);
|
||||
|
||||
/*
|
||||
* Free block starts at offset
|
||||
|
|
|
@ -110,7 +110,8 @@ int via_driver_vblank_wait(drm_device_t* dev, unsigned int* sequence)
|
|||
/*
|
||||
* drm_dma.h hooks
|
||||
*/
|
||||
void via_driver_irq_preinstall(drm_device_t* dev){
|
||||
void via_driver_irq_preinstall(drm_device_t * dev)
|
||||
{
|
||||
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
|
||||
u32 status;
|
||||
|
||||
|
@ -128,7 +129,8 @@ void via_driver_irq_preinstall(drm_device_t* dev){
|
|||
}
|
||||
}
|
||||
|
||||
void via_driver_irq_postinstall(drm_device_t* dev){
|
||||
void via_driver_irq_postinstall(drm_device_t * dev)
|
||||
{
|
||||
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
|
||||
u32 status;
|
||||
|
||||
|
@ -145,7 +147,8 @@ void via_driver_irq_postinstall(drm_device_t* dev){
|
|||
}
|
||||
}
|
||||
|
||||
void via_driver_irq_uninstall(drm_device_t* dev){
|
||||
void via_driver_irq_uninstall(drm_device_t * dev)
|
||||
{
|
||||
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
|
||||
u32 status;
|
||||
|
||||
|
@ -161,4 +164,3 @@ void via_driver_irq_uninstall(drm_device_t* dev){
|
|||
VIA_WRITE(VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBI_ENABLE);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -66,7 +66,6 @@ int via_do_init_map(drm_device_t *dev, drm_via_init_t *init)
|
|||
|
||||
dev_priv->agpAddr = init->agpAddr;
|
||||
|
||||
|
||||
for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i)
|
||||
DRM_INIT_WAITQUEUE(&(dev_priv->decoder_queue[i]));
|
||||
|
||||
|
@ -83,8 +82,7 @@ int via_do_cleanup_map(drm_device_t *dev)
|
|||
|
||||
via_dma_cleanup(dev);
|
||||
|
||||
drm_free(dev_priv, sizeof(drm_via_private_t),
|
||||
DRM_MEM_DRIVER);
|
||||
drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
|
||||
dev->dev_private = NULL;
|
||||
}
|
||||
|
||||
|
@ -129,8 +127,7 @@ int via_decoder_futex( DRM_IOCTL_ARGS )
|
|||
switch (fx.op) {
|
||||
case VIA_FUTEX_WAIT:
|
||||
DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx.lock],
|
||||
(fx.ms / 10)*(DRM_HZ/100),
|
||||
*lock != fx.val);
|
||||
(fx.ms / 10) * (DRM_HZ / 100), *lock != fx.val);
|
||||
return ret;
|
||||
case VIA_FUTEX_WAKE:
|
||||
DRM_WAKEUP(&(dev_priv->decoder_queue[fx.lock]));
|
||||
|
@ -138,6 +135,3 @@ int via_decoder_futex( DRM_IOCTL_ARGS )
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -44,8 +44,7 @@ static int add_alloc_set(int context, int type, unsigned int val)
|
|||
int i, retval = 0;
|
||||
|
||||
for (i = 0; i < MAX_CONTEXT; i++) {
|
||||
if (global_ppriv[i].used &&
|
||||
global_ppriv[i].context == context) {
|
||||
if (global_ppriv[i].used && global_ppriv[i].context == context) {
|
||||
retval = via_setAdd(global_ppriv[i].sets[type], val);
|
||||
break;
|
||||
}
|
||||
|
@ -59,8 +58,7 @@ static int del_alloc_set(int context, int type, unsigned int val)
|
|||
int i, retval = 0;
|
||||
|
||||
for (i = 0; i < MAX_CONTEXT; i++)
|
||||
if (global_ppriv[i].used &&
|
||||
global_ppriv[i].context == context) {
|
||||
if (global_ppriv[i].used && global_ppriv[i].context == context) {
|
||||
retval = via_setDel(global_ppriv[i].sets[type], val);
|
||||
break;
|
||||
}
|
||||
|
@ -91,7 +89,6 @@ int via_fb_init( DRM_IOCTL_ARGS )
|
|||
{
|
||||
drm_via_fb_t fb;
|
||||
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(fb, (drm_via_fb_t *) data, sizeof(fb));
|
||||
|
||||
FBHeap = via_mmInit(fb.offset, fb.size);
|
||||
|
@ -169,12 +166,12 @@ int via_final_context(struct drm_device *dev, int context)
|
|||
|
||||
global_ppriv[i].used = 0;
|
||||
}
|
||||
|
||||
#if defined(__linux__)
|
||||
/* Linux specific until context tracking code gets ported to BSD */
|
||||
/* Last context, perform cleanup */
|
||||
if (dev->ctx_count == 1 && dev->dev_private) {
|
||||
if (dev->irq) drm_irq_uninstall(dev);
|
||||
if (dev->irq)
|
||||
drm_irq_uninstall(dev);
|
||||
|
||||
via_do_cleanup_map(dev);
|
||||
}
|
||||
|
@ -182,6 +179,7 @@ int via_final_context(struct drm_device *dev, int context)
|
|||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int via_mem_alloc(DRM_IOCTL_ARGS)
|
||||
{
|
||||
drm_via_mem_t mem;
|
||||
|
@ -226,8 +224,7 @@ int via_fb_alloc(drm_via_mem_t* mem)
|
|||
via_mmFreeMem((PMemBlock) fb.free);
|
||||
retval = -1;
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
fb.offset = 0;
|
||||
fb.size = 0;
|
||||
fb.free = 0;
|
||||
|
@ -242,6 +239,7 @@ int via_fb_alloc(drm_via_mem_t* mem)
|
|||
|
||||
return retval;
|
||||
}
|
||||
|
||||
int via_agp_alloc(drm_via_mem_t * mem)
|
||||
{
|
||||
drm_via_mm_t agp;
|
||||
|
@ -263,8 +261,7 @@ int via_agp_alloc(drm_via_mem_t* mem)
|
|||
via_mmFreeMem((PMemBlock) agp.free);
|
||||
retval = -1;
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
agp.offset = 0;
|
||||
agp.size = 0;
|
||||
agp.free = 0;
|
||||
|
@ -304,7 +301,6 @@ int via_fb_free(drm_via_mem_t* mem)
|
|||
drm_via_mm_t fb;
|
||||
int retval = 0;
|
||||
|
||||
|
||||
if (!FBHeap) {
|
||||
return -1;
|
||||
}
|
||||
|
@ -312,16 +308,14 @@ int via_fb_free(drm_via_mem_t* mem)
|
|||
fb.free = mem->index;
|
||||
fb.context = mem->context;
|
||||
|
||||
if (!fb.free)
|
||||
{
|
||||
if (!fb.free) {
|
||||
return -1;
|
||||
|
||||
}
|
||||
|
||||
via_mmFreeMem((PMemBlock) fb.free);
|
||||
|
||||
if (!del_alloc_set(fb.context, VIDEO, fb.free))
|
||||
{
|
||||
if (!del_alloc_set(fb.context, VIDEO, fb.free)) {
|
||||
retval = -1;
|
||||
}
|
||||
|
||||
|
@ -352,4 +346,3 @@ int via_agp_free(drm_via_mem_t* mem)
|
|||
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue