Merge branch 'origin' into modesetting-101

Conflicts:

	linux-core/drmP.h
	shared-core/i915_dma.c
	shared-core/i915_drm.h
	shared-core/radeon_drv.h
main
Dave Airlie 2007-11-22 17:17:06 +11:00
commit a20587e395
53 changed files with 465 additions and 400 deletions

View File

@ -76,7 +76,11 @@ typedef struct drm_file drm_file_t;
#if defined(__FreeBSD__) #if defined(__FreeBSD__)
#include <sys/rman.h> #include <sys/rman.h>
#include <sys/memrange.h> #include <sys/memrange.h>
#if __FreeBSD_version >= 800004
#include <dev/agp/agpvar.h>
#else /* __FreeBSD_version >= 800004 */
#include <pci/agpvar.h> #include <pci/agpvar.h>
#endif /* __FreeBSD_version >= 800004 */
#include <sys/agpio.h> #include <sys/agpio.h>
#if __FreeBSD_version >= 500000 #if __FreeBSD_version >= 500000
#include <sys/mutex.h> #include <sys/mutex.h>

View File

@ -36,7 +36,11 @@
#include "drmP.h" #include "drmP.h"
#ifdef __FreeBSD__ #ifdef __FreeBSD__
#if __FreeBSD_version >= 800004
#include <dev/agp/agpreg.h>
#else /* __FreeBSD_version >= 800004 */
#include <pci/agpreg.h> #include <pci/agpreg.h>
#endif /* __FreeBSD_version >= 800004 */
#include <dev/pci/pcireg.h> #include <dev/pci/pcireg.h>
#endif #endif

View File

@ -35,7 +35,7 @@
# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */ # define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
static __inline__ void insert_page_into_table(struct ati_pcigart_info *info, u32 page_base, u32 *pci_gart) static __inline__ void insert_page_into_table(struct drm_ati_pcigart_info *info, u32 page_base, u32 *pci_gart)
{ {
switch(info->gart_reg_if) { switch(info->gart_reg_if) {
case DRM_ATI_GART_IGP: case DRM_ATI_GART_IGP:
@ -51,7 +51,7 @@ static __inline__ void insert_page_into_table(struct ati_pcigart_info *info, u32
} }
} }
static __inline__ u32 get_page_base_from_table(struct ati_pcigart_info *info, u32 *pci_gart) static __inline__ u32 get_page_base_from_table(struct drm_ati_pcigart_info *info, u32 *pci_gart)
{ {
u32 retval; u32 retval;
switch(info->gart_reg_if) { switch(info->gart_reg_if) {
@ -120,7 +120,7 @@ static void drm_ati_free_pcigart_table(void *address, int order)
free_pages((unsigned long)address, order); free_pages((unsigned long)address, order);
} }
int drm_ati_pcigart_cleanup(struct drm_device *dev, struct ati_pcigart_info *gart_info) int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
{ {
struct drm_sg_mem *entry = dev->sg; struct drm_sg_mem *entry = dev->sg;
unsigned long pages; unsigned long pages;
@ -171,7 +171,7 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct ati_pcigart_info *gar
} }
EXPORT_SYMBOL(drm_ati_pcigart_cleanup); EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
int drm_ati_pcigart_init(struct drm_device *dev, struct ati_pcigart_info *gart_info) int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
{ {
struct drm_sg_mem *entry = dev->sg; struct drm_sg_mem *entry = dev->sg;
void *address = NULL; void *address = NULL;
@ -293,7 +293,7 @@ static int ati_pcigart_bind_ttm(struct drm_ttm_backend *backend,
container_of(backend, ati_pcigart_ttm_backend_t, backend); container_of(backend, ati_pcigart_ttm_backend_t, backend);
off_t j; off_t j;
int i; int i;
struct ati_pcigart_info *info = atipci_be->gart_info; struct drm_ati_pcigart_info *info = atipci_be->gart_info;
u32 *pci_gart; u32 *pci_gart;
u32 page_base; u32 page_base;
unsigned long offset = bo_mem->mm_node->start; unsigned long offset = bo_mem->mm_node->start;
@ -333,7 +333,7 @@ static int ati_pcigart_unbind_ttm(struct drm_ttm_backend *backend)
{ {
ati_pcigart_ttm_backend_t *atipci_be = ati_pcigart_ttm_backend_t *atipci_be =
container_of(backend, ati_pcigart_ttm_backend_t, backend); container_of(backend, ati_pcigart_ttm_backend_t, backend);
struct ati_pcigart_info *info = atipci_be->gart_info; struct drm_ati_pcigart_info *info = atipci_be->gart_info;
unsigned long offset = atipci_be->offset; unsigned long offset = atipci_be->offset;
int i; int i;
off_t j; off_t j;
@ -392,7 +392,7 @@ static struct drm_ttm_backend_func ati_pcigart_ttm_backend =
.destroy = ati_pcigart_destroy_ttm, .destroy = ati_pcigart_destroy_ttm,
}; };
struct drm_ttm_backend *ati_pcigart_init_ttm(struct drm_device *dev, struct ati_pcigart_info *info, void (*gart_flush_fn)(struct drm_device *dev)) struct drm_ttm_backend *ati_pcigart_init_ttm(struct drm_device *dev, struct drm_ati_pcigart_info *info, void (*gart_flush_fn)(struct drm_device *dev))
{ {
ati_pcigart_ttm_backend_t *atipci_be; ati_pcigart_ttm_backend_t *atipci_be;

View File

@ -85,6 +85,7 @@
#include "drm_hashtab.h" #include "drm_hashtab.h"
#include "drm_internal.h" #include "drm_internal.h"
struct drm_device;
struct drm_file; struct drm_file;
/* If you want the memory alloc debug functionality, change define below */ /* If you want the memory alloc debug functionality, change define below */
@ -162,6 +163,12 @@ struct drm_file;
#define DRM_OBJECT_HASH_ORDER 12 #define DRM_OBJECT_HASH_ORDER 12
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
/*
* This should be small enough to allow the use of kmalloc for hash tables
* instead of vmalloc.
*/
#define DRM_FILE_HASH_ORDER 8
#define DRM_MM_INIT_MAX_PAGES 256 #define DRM_MM_INIT_MAX_PAGES 256
/*@}*/ /*@}*/
@ -204,7 +211,7 @@ struct drm_file;
#if DRM_DEBUG_CODE #if DRM_DEBUG_CODE
#define DRM_DEBUG(fmt, arg...) \ #define DRM_DEBUG(fmt, arg...) \
do { \ do { \
if ( drm_debug ) \ if ( drm_debug ) \
printk(KERN_DEBUG \ printk(KERN_DEBUG \
"[" DRM_NAME ":%s] " fmt , \ "[" DRM_NAME ":%s] " fmt , \
__FUNCTION__ , ##arg); \ __FUNCTION__ , ##arg); \
@ -278,9 +285,6 @@ do { \
return -EFAULT; \ return -EFAULT; \
} }
struct drm_device;
struct drm_file;
/** /**
* Ioctl function type. * Ioctl function type.
* *
@ -396,14 +400,9 @@ struct drm_buf_entry {
struct drm_freelist freelist; struct drm_freelist freelist;
}; };
/*
* This should be small enough to allow the use of kmalloc for hash tables
* instead of vmalloc.
*/
#define DRM_FILE_HASH_ORDER 8
enum drm_ref_type { enum drm_ref_type {
_DRM_REF_USE=0, _DRM_REF_USE = 0,
_DRM_REF_TYPE1, _DRM_REF_TYPE1,
_DRM_NO_REF_TYPES _DRM_NO_REF_TYPES
}; };
@ -508,14 +507,14 @@ struct drm_agp_mem {
/** /**
* AGP data. * AGP data.
* *
* \sa drm_agp_init)() and drm_device::agp. * \sa drm_agp_init() and drm_device::agp.
*/ */
struct drm_agp_head { struct drm_agp_head {
DRM_AGP_KERN agp_info; /**< AGP device information */ DRM_AGP_KERN agp_info; /**< AGP device information */
struct list_head memory; struct list_head memory;
unsigned long mode; /**< AGP mode */ unsigned long mode; /**< AGP mode */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,11) #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,11)
struct agp_bridge_data *bridge; struct agp_bridge_data *bridge;
#endif #endif
int enabled; /**< whether the AGP bus as been enabled */ int enabled; /**< whether the AGP bus as been enabled */
int acquired; /**< whether the AGP device has been acquired */ int acquired; /**< whether the AGP device has been acquired */
@ -599,7 +598,7 @@ struct drm_vbl_sig {
#define DRM_ATI_GART_PCIE 2 #define DRM_ATI_GART_PCIE 2
#define DRM_ATI_GART_IGP 3 #define DRM_ATI_GART_IGP 3
struct ati_pcigart_info { struct drm_ati_pcigart_info {
int gart_table_location; int gart_table_location;
int gart_reg_if; int gart_reg_if;
void *addr; void *addr;
@ -629,14 +628,14 @@ struct drm_driver {
int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
void (*dma_ready) (struct drm_device *); void (*dma_ready) (struct drm_device *);
int (*dma_quiescent) (struct drm_device *); int (*dma_quiescent) (struct drm_device *);
int (*context_ctor) (struct drm_device * dev, int context); int (*context_ctor) (struct drm_device *dev, int context);
int (*context_dtor) (struct drm_device * dev, int context); int (*context_dtor) (struct drm_device *dev, int context);
int (*kernel_context_switch) (struct drm_device * dev, int old, int (*kernel_context_switch) (struct drm_device *dev, int old,
int new); int new);
void (*kernel_context_switch_unlock) (struct drm_device * dev); void (*kernel_context_switch_unlock) (struct drm_device *dev);
int (*vblank_wait) (struct drm_device * dev, unsigned int *sequence); int (*vblank_wait) (struct drm_device *dev, unsigned int *sequence);
int (*vblank_wait2) (struct drm_device * dev, unsigned int *sequence); int (*vblank_wait2) (struct drm_device *dev, unsigned int *sequence);
int (*dri_library_name) (struct drm_device * dev, char * buf); int (*dri_library_name) (struct drm_device *dev, char * buf);
/** /**
* Called by \c drm_device_is_agp. Typically used to determine if a * Called by \c drm_device_is_agp. Typically used to determine if a
@ -649,22 +648,23 @@ struct drm_driver {
* card is absolutely \b not AGP (return of 0), absolutely \b is AGP * card is absolutely \b not AGP (return of 0), absolutely \b is AGP
* (return of 1), or may or may not be AGP (return of 2). * (return of 1), or may or may not be AGP (return of 2).
*/ */
int (*device_is_agp) (struct drm_device * dev); int (*device_is_agp) (struct drm_device *dev);
/* these have to be filled in */ /* these have to be filled in */
irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
void (*irq_preinstall) (struct drm_device * dev); void (*irq_preinstall) (struct drm_device *dev);
void (*irq_postinstall) (struct drm_device * dev); void (*irq_postinstall) (struct drm_device *dev);
void (*irq_uninstall) (struct drm_device * dev); void (*irq_uninstall) (struct drm_device *dev);
void (*reclaim_buffers) (struct drm_device *dev, void (*reclaim_buffers) (struct drm_device *dev,
struct drm_file *file_priv); struct drm_file *file_priv);
void (*reclaim_buffers_locked) (struct drm_device *dev, void (*reclaim_buffers_locked) (struct drm_device *dev,
struct drm_file *file_priv); struct drm_file *file_priv);
void (*reclaim_buffers_idlelocked) (struct drm_device *dev, void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
struct drm_file *file_priv); struct drm_file *file_priv);
unsigned long (*get_map_ofs) (struct drm_map * map); unsigned long (*get_map_ofs) (struct drm_map *map);
unsigned long (*get_reg_ofs) (struct drm_device * dev); unsigned long (*get_reg_ofs) (struct drm_device *dev);
void (*set_version) (struct drm_device * dev, struct drm_set_version * sv); void (*set_version) (struct drm_device *dev,
struct drm_set_version *sv);
/* FB routines, if present */ /* FB routines, if present */
int (*fb_probe)(struct drm_device *dev, struct drm_crtc *crtc); int (*fb_probe)(struct drm_device *dev, struct drm_crtc *crtc);
@ -857,7 +857,7 @@ typedef struct ati_pcigart_ttm_backend {
struct drm_ttm_backend backend; struct drm_ttm_backend backend;
int populated; int populated;
void (*gart_flush_fn)(struct drm_device *dev); void (*gart_flush_fn)(struct drm_device *dev);
struct ati_pcigart_info *gart_info; struct drm_ati_pcigart_info *gart_info;
unsigned long offset; unsigned long offset;
struct page **pages; struct page **pages;
int num_pages; int num_pages;
@ -1179,9 +1179,9 @@ extern int drm_sg_free(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
/* ATI PCIGART support (ati_pcigart.h) */ /* ATI PCIGART support (ati_pcigart.h) */
extern int drm_ati_pcigart_init(struct drm_device *dev, struct ati_pcigart_info *gart_info); extern int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info);
extern int drm_ati_pcigart_cleanup(struct drm_device *dev, struct ati_pcigart_info *gart_info); extern int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info);
extern struct drm_ttm_backend *ati_pcigart_init_ttm(struct drm_device *dev, struct ati_pcigart_info *info, void (*gart_flush_fn)(struct drm_device *dev)); extern struct drm_ttm_backend *ati_pcigart_init_ttm(struct drm_device *dev, struct drm_ati_pcigart_info *info, void (*gart_flush_fn)(struct drm_device *dev));
extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
size_t align, dma_addr_t maxaddr); size_t align, dma_addr_t maxaddr);
@ -1192,7 +1192,7 @@ extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah);
struct drm_sysfs_class; struct drm_sysfs_class;
extern struct class *drm_sysfs_create(struct module *owner, char *name); extern struct class *drm_sysfs_create(struct module *owner, char *name);
extern void drm_sysfs_destroy(void); extern void drm_sysfs_destroy(void);
extern int drm_sysfs_device_add(struct drm_device *dev, struct drm_head * head); extern int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head);
extern void drm_sysfs_device_remove(struct drm_device *dev); extern void drm_sysfs_device_remove(struct drm_device *dev);
/* /*
@ -1232,7 +1232,7 @@ static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
static __inline__ int drm_device_is_agp(struct drm_device *dev) static __inline__ int drm_device_is_agp(struct drm_device *dev)
{ {
if ( dev->driver->device_is_agp != NULL ) { if ( dev->driver->device_is_agp != NULL ) {
int err = (*dev->driver->device_is_agp)( dev ); int err = (*dev->driver->device_is_agp)(dev);
if (err != 2) { if (err != 2) {
return err; return err;

View File

@ -48,7 +48,7 @@
* Verifies the AGP device has been initialized and acquired and fills in the * Verifies the AGP device has been initialized and acquired and fills in the
* drm_agp_info structure with the information in drm_agp_head::agp_info. * drm_agp_info structure with the information in drm_agp_head::agp_info.
*/ */
int drm_agp_info(struct drm_device * dev, struct drm_agp_info *info) int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
{ {
DRM_AGP_KERN *kern; DRM_AGP_KERN *kern;
@ -130,7 +130,7 @@ EXPORT_SYMBOL(drm_agp_acquire);
int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
return drm_agp_acquire( (struct drm_device *) file_priv->head->dev ); return drm_agp_acquire((struct drm_device *) file_priv->head->dev);
} }
/** /**
@ -426,7 +426,7 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
if (!(head->bridge = agp_backend_acquire(dev->pdev))) { if (!(head->bridge = agp_backend_acquire(dev->pdev))) {
drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS); drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS);
return NULL; return NULL;
} }
agp_copy_info(head->bridge, &head->agp_info); agp_copy_info(head->bridge, &head->agp_info);
agp_backend_release(head->bridge); agp_backend_release(head->bridge);
} else { } else {

View File

@ -83,7 +83,7 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
return -ENOMEM; return -ENOMEM;
memset(entry, 0, sizeof(*entry)); memset(entry, 0, sizeof(*entry));
entry->priv = priv; entry->priv = priv;
entry->hash_item.key = (unsigned long) magic; entry->hash_item.key = (unsigned long)magic;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
drm_ht_insert_item(&dev->magiclist, &entry->hash_item); drm_ht_insert_item(&dev->magiclist, &entry->hash_item);
list_add_tail(&entry->head, &dev->magicfree); list_add_tail(&entry->head, &dev->magicfree);
@ -109,7 +109,7 @@ static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic)
DRM_DEBUG("%d\n", magic); DRM_DEBUG("%d\n", magic);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (drm_ht_find_item(&dev->magiclist, (unsigned long) magic, &hash)) { if (drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return -EINVAL; return -EINVAL;
} }

View File

@ -173,12 +173,17 @@ static int drm_addmap_core(struct drm_device *dev, unsigned int offset,
if (drm_core_has_MTRR(dev)) { if (drm_core_has_MTRR(dev)) {
if (map->type == _DRM_FRAME_BUFFER || if (map->type == _DRM_FRAME_BUFFER ||
(map->flags & _DRM_WRITE_COMBINING)) { (map->flags & _DRM_WRITE_COMBINING)) {
map->mtrr = mtrr_add(map->offset, map->size, map->mtrr = mtrr_add(map->offset, map->size,
MTRR_TYPE_WRCOMB, 1); MTRR_TYPE_WRCOMB, 1);
} }
} }
if (map->type == _DRM_REGISTERS) if (map->type == _DRM_REGISTERS) {
map->handle = ioremap(map->offset, map->size); map->handle = ioremap(map->offset, map->size);
if (!map->handle) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -ENOMEM;
}
}
break; break;
case _DRM_SHM: case _DRM_SHM:
list = drm_find_matching_map(dev, map); list = drm_find_matching_map(dev, map);
@ -387,9 +392,9 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
} }
} }
if (!found) { if (!found)
return -EINVAL; return -EINVAL;
}
/* List has wrapped around to the head pointer, or it's empty and we /* List has wrapped around to the head pointer, or it's empty and we
* didn't find anything. * didn't find anything.
*/ */
@ -495,7 +500,8 @@ int drm_rmmap_ioctl(struct drm_device *dev, void *data,
* *
* Frees any pages and buffers associated with the given entry. * Frees any pages and buffers associated with the given entry.
*/ */
static void drm_cleanup_buf_error(struct drm_device *dev, struct drm_buf_entry * entry) static void drm_cleanup_buf_error(struct drm_device *dev,
struct drm_buf_entry *entry)
{ {
int i; int i;
@ -530,7 +536,7 @@ static void drm_cleanup_buf_error(struct drm_device *dev, struct drm_buf_entry *
#if __OS_HAS_AGP #if __OS_HAS_AGP
/** /**
* Add AGP buffers for DMA transfers * Add AGP buffers for DMA transfers.
* *
* \param dev struct drm_device to which the buffers are to be added. * \param dev struct drm_device to which the buffers are to be added.
* \param request pointer to a struct drm_buf_desc describing the request. * \param request pointer to a struct drm_buf_desc describing the request.
@ -540,7 +546,7 @@ static void drm_cleanup_buf_error(struct drm_device *dev, struct drm_buf_entry *
* reallocates the buffer list of the same size order to accommodate the new * reallocates the buffer list of the same size order to accommodate the new
* buffers. * buffers.
*/ */
int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request) int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
{ {
struct drm_device_dma *dma = dev->dma; struct drm_device_dma *dma = dev->dma;
struct drm_buf_entry *entry; struct drm_buf_entry *entry;
@ -710,7 +716,7 @@ int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request)
EXPORT_SYMBOL(drm_addbufs_agp); EXPORT_SYMBOL(drm_addbufs_agp);
#endif /* __OS_HAS_AGP */ #endif /* __OS_HAS_AGP */
int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request) int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
{ {
struct drm_device_dma *dma = dev->dma; struct drm_device_dma *dma = dev->dma;
int count; int count;
@ -936,7 +942,7 @@ int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request)
} }
EXPORT_SYMBOL(drm_addbufs_pci); EXPORT_SYMBOL(drm_addbufs_pci);
static int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc * request) static int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
{ {
struct drm_device_dma *dma = dev->dma; struct drm_device_dma *dma = dev->dma;
struct drm_buf_entry *entry; struct drm_buf_entry *entry;

View File

@ -43,7 +43,7 @@
* *
* Allocate and initialize a drm_device_dma structure. * Allocate and initialize a drm_device_dma structure.
*/ */
int drm_dma_setup(struct drm_device * dev) int drm_dma_setup(struct drm_device *dev)
{ {
int i; int i;
@ -67,7 +67,7 @@ int drm_dma_setup(struct drm_device * dev)
* Free all pages associated with DMA buffers, the buffers and pages lists, and * Free all pages associated with DMA buffers, the buffers and pages lists, and
* finally the drm_device::dma structure itself. * finally the drm_device::dma structure itself.
*/ */
void drm_dma_takedown(struct drm_device * dev) void drm_dma_takedown(struct drm_device *dev)
{ {
struct drm_device_dma *dma = dev->dma; struct drm_device_dma *dma = dev->dma;
int i, j; int i, j;
@ -129,7 +129,7 @@ void drm_dma_takedown(struct drm_device * dev)
* *
* Resets the fields of \p buf. * Resets the fields of \p buf.
*/ */
void drm_free_buffer(struct drm_device * dev, struct drm_buf * buf) void drm_free_buffer(struct drm_device *dev, struct drm_buf *buf)
{ {
if (!buf) if (!buf)
return; return;

View File

@ -189,8 +189,8 @@ int drm_lastclose(struct drm_device * dev)
if (dev->unique) { if (dev->unique) {
drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
dev->unique=NULL; dev->unique = NULL;
dev->unique_len=0; dev->unique_len = 0;
} }
if (dev->irq_enabled) if (dev->irq_enabled)

View File

@ -152,7 +152,7 @@ int drm_open(struct inode *inode, struct file *filp)
spin_unlock(&dev->count_lock); spin_unlock(&dev->count_lock);
} }
out: out:
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
BUG_ON((dev->dev_mapping != NULL) && BUG_ON((dev->dev_mapping != NULL) &&
(dev->dev_mapping != inode->i_mapping)); (dev->dev_mapping != inode->i_mapping));
@ -236,7 +236,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
int minor = iminor(inode); int minor = iminor(inode);
struct drm_file *priv; struct drm_file *priv;
int ret; int ret;
int i,j; int i, j;
if (filp->f_flags & O_EXCL) if (filp->f_flags & O_EXCL)
return -EBUSY; /* No exclusive opens */ return -EBUSY; /* No exclusive opens */
@ -265,14 +265,15 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
INIT_LIST_HEAD(&priv->refd_objects); INIT_LIST_HEAD(&priv->refd_objects);
INIT_LIST_HEAD(&priv->fbs); INIT_LIST_HEAD(&priv->fbs);
for (i=0; i<_DRM_NO_REF_TYPES; ++i) { for (i = 0; i < _DRM_NO_REF_TYPES; ++i) {
ret = drm_ht_create(&priv->refd_object_hash[i], DRM_FILE_HASH_ORDER); ret = drm_ht_create(&priv->refd_object_hash[i],
DRM_FILE_HASH_ORDER);
if (ret) if (ret)
break; break;
} }
if (ret) { if (ret) {
for(j = 0; j < i; ++j) for (j = 0; j < i; ++j)
drm_ht_remove(&priv->refd_object_hash[j]); drm_ht_remove(&priv->refd_object_hash[j]);
goto out_free; goto out_free;
} }
@ -341,8 +342,9 @@ static void drm_object_release(struct file *filp)
/* /*
* Free leftover ref objects created by me. Note that we cannot use * Free leftover ref objects created by me. Note that we cannot use
* list_for_each() here, as the struct_mutex may be temporarily released * list_for_each() here, as the struct_mutex may be temporarily
* by the remove_() functions, and thus the lists may be altered. * released by the remove_() functions, and thus the lists may be
* altered.
* Also, a drm_remove_ref_object() will not remove it * Also, a drm_remove_ref_object() will not remove it
* from the list unless its refcount is 1. * from the list unless its refcount is 1.
*/ */
@ -354,9 +356,8 @@ static void drm_object_release(struct file *filp)
head = &priv->refd_objects; head = &priv->refd_objects;
} }
for(i = 0; i < _DRM_NO_REF_TYPES; ++i) { for (i = 0; i < _DRM_NO_REF_TYPES; ++i)
drm_ht_remove(&priv->refd_object_hash[i]); drm_ht_remove(&priv->refd_object_hash[i]);
}
} }
/** /**

View File

@ -36,7 +36,7 @@
#include "drm_hashtab.h" #include "drm_hashtab.h"
#include <linux/hash.h> #include <linux/hash.h>
int drm_ht_create(struct drm_open_hash * ht, unsigned int order) int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
{ {
unsigned int i; unsigned int i;
@ -63,7 +63,7 @@ int drm_ht_create(struct drm_open_hash * ht, unsigned int order)
return 0; return 0;
} }
void drm_ht_verbose_list(struct drm_open_hash * ht, unsigned long key) void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
{ {
struct drm_hash_item *entry; struct drm_hash_item *entry;
struct hlist_head *h_list; struct hlist_head *h_list;
@ -80,7 +80,7 @@ void drm_ht_verbose_list(struct drm_open_hash * ht, unsigned long key)
} }
} }
static struct hlist_node *drm_ht_find_key(struct drm_open_hash * ht, static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
unsigned long key) unsigned long key)
{ {
struct drm_hash_item *entry; struct drm_hash_item *entry;
@ -100,7 +100,7 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash * ht,
return NULL; return NULL;
} }
int drm_ht_insert_item(struct drm_open_hash * ht, struct drm_hash_item * item) int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{ {
struct drm_hash_item *entry; struct drm_hash_item *entry;
struct hlist_head *h_list; struct hlist_head *h_list;
@ -131,7 +131,8 @@ int drm_ht_insert_item(struct drm_open_hash * ht, struct drm_hash_item * item)
* Just insert an item and return any "bits" bit key that hasn't been * Just insert an item and return any "bits" bit key that hasn't been
* used before. * used before.
*/ */
int drm_ht_just_insert_please(struct drm_open_hash * ht, struct drm_hash_item * item, int drm_ht_just_insert_please(struct drm_open_hash *ht,
struct drm_hash_item *item,
unsigned long seed, int bits, int shift, unsigned long seed, int bits, int shift,
unsigned long add) unsigned long add)
{ {
@ -155,8 +156,8 @@ int drm_ht_just_insert_please(struct drm_open_hash * ht, struct drm_hash_item *
return 0; return 0;
} }
int drm_ht_find_item(struct drm_open_hash * ht, unsigned long key, int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
struct drm_hash_item ** item) struct drm_hash_item **item)
{ {
struct hlist_node *list; struct hlist_node *list;
@ -168,7 +169,7 @@ int drm_ht_find_item(struct drm_open_hash * ht, unsigned long key,
return 0; return 0;
} }
int drm_ht_remove_key(struct drm_open_hash * ht, unsigned long key) int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
{ {
struct hlist_node *list; struct hlist_node *list;
@ -181,14 +182,14 @@ int drm_ht_remove_key(struct drm_open_hash * ht, unsigned long key)
return -EINVAL; return -EINVAL;
} }
int drm_ht_remove_item(struct drm_open_hash * ht, struct drm_hash_item * item) int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{ {
hlist_del_init(&item->head); hlist_del_init(&item->head);
ht->fill--; ht->fill--;
return 0; return 0;
} }
void drm_ht_remove(struct drm_open_hash * ht) void drm_ht_remove(struct drm_open_hash *ht)
{ {
if (ht->table) { if (ht->table) {
if (ht->use_vmalloc) if (ht->use_vmalloc)

View File

@ -396,7 +396,7 @@ EXPORT_SYMBOL(drm_vbl_send_signals);
*/ */
static void drm_locked_tasklet_func(unsigned long data) static void drm_locked_tasklet_func(unsigned long data)
{ {
struct drm_device *dev = (struct drm_device*)data; struct drm_device *dev = (struct drm_device *)data;
unsigned long irqflags; unsigned long irqflags;
spin_lock_irqsave(&dev->tasklet_lock, irqflags); spin_lock_irqsave(&dev->tasklet_lock, irqflags);
@ -433,7 +433,7 @@ static void drm_locked_tasklet_func(unsigned long data)
* context, it must not make any assumptions about this. Also, the HW lock will * context, it must not make any assumptions about this. Also, the HW lock will
* be held with the kernel context or any client context. * be held with the kernel context or any client context.
*/ */
void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device*)) void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *))
{ {
unsigned long irqflags; unsigned long irqflags;
static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0); static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);

View File

@ -45,13 +45,13 @@ static struct {
.lock = SPIN_LOCK_UNLOCKED .lock = SPIN_LOCK_UNLOCKED
}; };
static inline size_t drm_size_align(size_t size) { static inline size_t drm_size_align(size_t size)
{
size_t tmpSize = 4; size_t tmpSize = 4;
if (size > PAGE_SIZE) if (size > PAGE_SIZE)
return PAGE_ALIGN(size); return PAGE_ALIGN(size);
while(tmpSize < size) while (tmpSize < size)
tmpSize <<= 1; tmpSize <<= 1;
return (size_t) tmpSize; return (size_t) tmpSize;

View File

@ -42,7 +42,6 @@
* drm_memory.h. * drm_memory.h.
*/ */
/* Need the 4-argument version of vmap(). */
#if __OS_HAS_AGP #if __OS_HAS_AGP
#include <linux/vmalloc.h> #include <linux/vmalloc.h>

View File

@ -123,7 +123,7 @@ EXPORT_SYMBOL(drm_pci_alloc);
* *
* This function is for internal use in the Linux-specific DRM core code. * This function is for internal use in the Linux-specific DRM core code.
*/ */
void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t *dmah) void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah)
{ {
unsigned long addr; unsigned long addr;
size_t sz; size_t sz;
@ -167,7 +167,7 @@ void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t *dmah)
/** /**
* \brief Free a PCI consistent memory block * \brief Free a PCI consistent memory block
*/ */
void drm_pci_free(struct drm_device * dev, drm_dma_handle_t *dmah) void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah)
{ {
__drm_pci_free(dev, dmah); __drm_pci_free(dev, dmah);
kfree(dmah); kfree(dmah);

View File

@ -239,10 +239,10 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
else else
type = types[map->type]; type = types[map->type];
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ", DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
i, i,
map->offset, map->offset,
map->size, type, map->flags, map->size, type, map->flags,
(unsigned long) r_list->user_token); (unsigned long) r_list->user_token);
if (map->mtrr < 0) { if (map->mtrr < 0) {
DRM_PROC_PRINT("none\n"); DRM_PROC_PRINT("none\n");

View File

@ -109,11 +109,10 @@ int drm_regs_alloc(struct drm_reg_manager *manager,
*/ */
return -EBUSY; return -EBUSY;
out: out:
*reg = entry; *reg = entry;
return 0; return 0;
} }
EXPORT_SYMBOL(drm_regs_alloc); EXPORT_SYMBOL(drm_regs_alloc);
void drm_regs_fence(struct drm_reg_manager *manager, void drm_regs_fence(struct drm_reg_manager *manager,
@ -156,7 +155,6 @@ void drm_regs_fence(struct drm_reg_manager *manager,
} }
} }
} }
EXPORT_SYMBOL(drm_regs_fence); EXPORT_SYMBOL(drm_regs_fence);
void drm_regs_free(struct drm_reg_manager *manager) void drm_regs_free(struct drm_reg_manager *manager)
@ -180,7 +178,6 @@ void drm_regs_free(struct drm_reg_manager *manager)
manager->reg_destroy(entry); manager->reg_destroy(entry);
} }
} }
EXPORT_SYMBOL(drm_regs_free); EXPORT_SYMBOL(drm_regs_free);
void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg) void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg)
@ -188,7 +185,6 @@ void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg)
reg->fence = NULL; reg->fence = NULL;
list_add_tail(&reg->head, &manager->free); list_add_tail(&reg->head, &manager->free);
} }
EXPORT_SYMBOL(drm_regs_add); EXPORT_SYMBOL(drm_regs_add);
void drm_regs_init(struct drm_reg_manager *manager, void drm_regs_init(struct drm_reg_manager *manager,
@ -201,5 +197,4 @@ void drm_regs_init(struct drm_reg_manager *manager,
manager->reg_reusable = reg_reusable; manager->reg_reusable = reg_reusable;
manager->reg_destroy = reg_destroy; manager->reg_destroy = reg_destroy;
} }
EXPORT_SYMBOL(drm_regs_init); EXPORT_SYMBOL(drm_regs_init);

View File

@ -264,7 +264,8 @@ int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item)) if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
return -EINVAL; return -EINVAL;
memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item, user_hash); memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
user_hash);
drm_sman_free(memblock_item); drm_sman_free(memblock_item);
return 0; return 0;
} }

View File

@ -55,8 +55,8 @@ struct class *drm_class;
struct proc_dir_entry *drm_proc_root; struct proc_dir_entry *drm_proc_root;
static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
const struct pci_device_id *ent, const struct pci_device_id *ent,
struct drm_driver *driver) struct drm_driver *driver)
{ {
int retcode; int retcode;
@ -218,7 +218,7 @@ err_g1:
* Try and register, if we fail to register, backout previous work. * Try and register, if we fail to register, backout previous work.
*/ */
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver) struct drm_driver *driver)
{ {
struct drm_device *dev; struct drm_device *dev;
int ret; int ret;
@ -318,7 +318,7 @@ int drm_put_head(struct drm_head * head)
drm_proc_cleanup(minor, drm_proc_root, head->dev_root); drm_proc_cleanup(minor, drm_proc_root, head->dev_root);
drm_sysfs_device_remove(head->dev); drm_sysfs_device_remove(head->dev);
*head = (struct drm_head){.dev = NULL}; *head = (struct drm_head) {.dev = NULL};
drm_heads[minor] = NULL; drm_heads[minor] = NULL;
return 0; return 0;

View File

@ -239,9 +239,6 @@ struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index)
} }
EXPORT_SYMBOL(drm_ttm_get_page); EXPORT_SYMBOL(drm_ttm_get_page);
int drm_ttm_set_user(struct drm_ttm *ttm, int drm_ttm_set_user(struct drm_ttm *ttm,
struct task_struct *tsk, struct task_struct *tsk,
int write, int write,
@ -278,8 +275,6 @@ int drm_ttm_set_user(struct drm_ttm *ttm,
return 0; return 0;
} }
int drm_ttm_populate(struct drm_ttm *ttm) int drm_ttm_populate(struct drm_ttm *ttm)
{ {
struct page *page; struct page *page;

View File

@ -263,7 +263,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
dmah.size = map->size; dmah.size = map->size;
__drm_pci_free(dev, &dmah); __drm_pci_free(dev, &dmah);
break; break;
case _DRM_TTM: case _DRM_TTM:
BUG_ON(1); BUG_ON(1);
break; break;
} }
@ -632,9 +632,9 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif #endif
if (io_remap_pfn_range(vma, vma->vm_start, if (io_remap_pfn_range(vma, vma->vm_start,
(map->offset + offset) >> PAGE_SHIFT, (map->offset + offset) >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_end - vma->vm_start,
vma->vm_page_prot)) vma->vm_page_prot))
return -EAGAIN; return -EAGAIN;
DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
" offset = 0x%lx\n", " offset = 0x%lx\n",

View File

@ -1271,7 +1271,7 @@ int i810_driver_dma_quiescent(struct drm_device * dev)
} }
struct drm_ioctl_desc i810_ioctls[] = { struct drm_ioctl_desc i810_ioctls[] = {
DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH), DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH), DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH),

View File

@ -45,7 +45,7 @@ typedef struct _drm_i915_batchbuffer32 {
} drm_i915_batchbuffer32_t; } drm_i915_batchbuffer32_t;
static int compat_i915_batchbuffer(struct file *file, unsigned int cmd, static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
unsigned long arg) unsigned long arg)
{ {
drm_i915_batchbuffer32_t batchbuffer32; drm_i915_batchbuffer32_t batchbuffer32;
drm_i915_batchbuffer_t __user *batchbuffer; drm_i915_batchbuffer_t __user *batchbuffer;
@ -81,7 +81,7 @@ typedef struct _drm_i915_cmdbuffer32 {
} drm_i915_cmdbuffer32_t; } drm_i915_cmdbuffer32_t;
static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd, static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
unsigned long arg) unsigned long arg)
{ {
drm_i915_cmdbuffer32_t cmdbuffer32; drm_i915_cmdbuffer32_t cmdbuffer32;
drm_i915_cmdbuffer_t __user *cmdbuffer; drm_i915_cmdbuffer_t __user *cmdbuffer;
@ -111,7 +111,7 @@ typedef struct drm_i915_irq_emit32 {
} drm_i915_irq_emit32_t; } drm_i915_irq_emit32_t;
static int compat_i915_irq_emit(struct file *file, unsigned int cmd, static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
unsigned long arg) unsigned long arg)
{ {
drm_i915_irq_emit32_t req32; drm_i915_irq_emit32_t req32;
drm_i915_irq_emit_t __user *request; drm_i915_irq_emit_t __user *request;
@ -134,7 +134,7 @@ typedef struct drm_i915_getparam32 {
} drm_i915_getparam32_t; } drm_i915_getparam32_t;
static int compat_i915_getparam(struct file *file, unsigned int cmd, static int compat_i915_getparam(struct file *file, unsigned int cmd,
unsigned long arg) unsigned long arg)
{ {
drm_i915_getparam32_t req32; drm_i915_getparam32_t req32;
drm_i915_getparam_t __user *request; drm_i915_getparam_t __user *request;
@ -161,7 +161,7 @@ typedef struct drm_i915_mem_alloc32 {
} drm_i915_mem_alloc32_t; } drm_i915_mem_alloc32_t;
static int compat_i915_alloc(struct file *file, unsigned int cmd, static int compat_i915_alloc(struct file *file, unsigned int cmd,
unsigned long arg) unsigned long arg)
{ {
drm_i915_mem_alloc32_t req32; drm_i915_mem_alloc32_t req32;
drm_i915_mem_alloc_t __user *request; drm_i915_mem_alloc_t __user *request;

View File

@ -142,8 +142,8 @@ static int mga_driver_device_is_agp(struct drm_device * dev)
*/ */
if ((pdev->device == 0x0525) && pdev->bus->self if ((pdev->device == 0x0525) && pdev->bus->self
&& (pdev->bus->self->vendor == 0x3388) && (pdev->bus->self->vendor == 0x3388)
&& (pdev->bus->self->device == 0x0021) ) { && (pdev->bus->self->device == 0x0021)) {
return 0; return 0;
} }

View File

@ -143,12 +143,13 @@ nouveau_bo_evict_mask(struct drm_buffer_object *bo)
return 0; return 0;
} }
/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
* DRM_BO_MEM_{VRAM,PRIV0,TT} directly. * DRM_BO_MEM_{VRAM,PRIV0,TT} directly.
*/ */
static int static int
nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait, nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait,
struct drm_bo_mem_reg *new_mem) struct drm_bo_mem_reg *new_mem)
{ {
struct drm_device *dev = bo->dev; struct drm_device *dev = bo->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
@ -195,6 +196,46 @@ nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait,
DRM_FENCE_TYPE_EXE, 0, new_mem); DRM_FENCE_TYPE_EXE, 0, new_mem);
} }
/* Flip pages into the GART and move if we can. */
static int
nouveau_bo_move_gart(struct drm_buffer_object *bo, int evict, int no_wait,
struct drm_bo_mem_reg *new_mem)
{
struct drm_device *dev = bo->dev;
struct drm_bo_mem_reg tmp_mem;
int ret;
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
if (ret)
return ret;
ret = drm_bind_ttm(bo->ttm, &tmp_mem);
if (ret)
goto out_cleanup;
ret = nouveau_bo_move_m2mf(bo, 1, no_wait, &tmp_mem);
if (ret)
goto out_cleanup;
ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
out_cleanup:
if (tmp_mem.mm_node) {
mutex_lock(&dev->struct_mutex);
if (tmp_mem.mm_node != bo->pinned_node)
drm_mm_put_block(tmp_mem.mm_node);
tmp_mem.mm_node = NULL;
mutex_unlock(&dev->struct_mutex);
}
return ret;
}
static int static int
nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait, nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait,
struct drm_bo_mem_reg *new_mem) struct drm_bo_mem_reg *new_mem)
@ -205,14 +246,14 @@ nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait,
if (old_mem->mem_type == DRM_BO_MEM_LOCAL) if (old_mem->mem_type == DRM_BO_MEM_LOCAL)
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
#if 0 #if 0
if (!nouveau_bo_move_flipd(bo, evict, no_wait, new_mem)) if (!nouveau_bo_move_to_gart(bo, evict, no_wait, new_mem))
#endif #endif
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
} }
else else
if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
#if 0 #if 0
if (nouveau_bo_move_flips(bo, evict, no_wait, new_mem)) if (nouveau_bo_move_to_gart(bo, evict, no_wait, new_mem))
#endif #endif
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
} }

View File

@ -136,7 +136,7 @@ typedef struct drm_radeon_stipple32 {
static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd, static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd,
unsigned long arg) unsigned long arg)
{ {
drm_radeon_stipple32_t __user *argp = (void __user *) arg; drm_radeon_stipple32_t __user *argp = (void __user *)arg;
drm_radeon_stipple_t __user *request; drm_radeon_stipple_t __user *request;
u32 mask; u32 mask;
@ -176,7 +176,7 @@ static int compat_radeon_cp_texture(struct file *file, unsigned int cmd,
drm_radeon_tex_image32_t img32; drm_radeon_tex_image32_t img32;
drm_radeon_tex_image_t __user *image; drm_radeon_tex_image_t __user *image;
if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT; return -EFAULT;
if (req32.image == 0) if (req32.image == 0)
return -EINVAL; return -EINVAL;
@ -223,7 +223,7 @@ static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd,
drm_radeon_vertex2_32_t req32; drm_radeon_vertex2_32_t req32;
drm_radeon_vertex2_t __user *request; drm_radeon_vertex2_t __user *request;
if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT; return -EFAULT;
request = compat_alloc_user_space(sizeof(*request)); request = compat_alloc_user_space(sizeof(*request));
@ -255,7 +255,7 @@ static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd,
drm_radeon_cmd_buffer32_t req32; drm_radeon_cmd_buffer32_t req32;
drm_radeon_cmd_buffer_t __user *request; drm_radeon_cmd_buffer_t __user *request;
if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT; return -EFAULT;
request = compat_alloc_user_space(sizeof(*request)); request = compat_alloc_user_space(sizeof(*request));
@ -283,7 +283,7 @@ static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd,
drm_radeon_getparam32_t req32; drm_radeon_getparam32_t req32;
drm_radeon_getparam_t __user *request; drm_radeon_getparam_t __user *request;
if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT; return -EFAULT;
request = compat_alloc_user_space(sizeof(*request)); request = compat_alloc_user_space(sizeof(*request));
@ -310,7 +310,7 @@ static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd,
drm_radeon_mem_alloc32_t req32; drm_radeon_mem_alloc32_t req32;
drm_radeon_mem_alloc_t __user *request; drm_radeon_mem_alloc_t __user *request;
if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT; return -EFAULT;
request = compat_alloc_user_space(sizeof(*request)); request = compat_alloc_user_space(sizeof(*request));
@ -336,7 +336,7 @@ static int compat_radeon_irq_emit(struct file *file, unsigned int cmd,
drm_radeon_irq_emit32_t req32; drm_radeon_irq_emit32_t req32;
drm_radeon_irq_emit_t __user *request; drm_radeon_irq_emit_t __user *request;
if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT; return -EFAULT;
request = compat_alloc_user_space(sizeof(*request)); request = compat_alloc_user_space(sizeof(*request));
@ -362,7 +362,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
drm_radeon_setparam32_t req32; drm_radeon_setparam32_t req32;
drm_radeon_setparam_t __user *request; drm_radeon_setparam_t __user *request;
if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT; return -EFAULT;
request = compat_alloc_user_space(sizeof(*request)); request = compat_alloc_user_space(sizeof(*request));

View File

@ -74,7 +74,7 @@ static void sis_sman_mm_destroy(void *private)
; ;
} }
unsigned long sis_sman_mm_offset(void *private, void *ref) static unsigned long sis_sman_mm_offset(void *private, void *ref)
{ {
return ~((unsigned long)ref); return ~((unsigned long)ref);
} }
@ -119,7 +119,7 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
return 0; return 0;
} }
static int sis_drm_alloc(struct drm_device * dev, struct drm_file *file_priv, static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
void *data, int pool) void *data, int pool)
{ {
drm_sis_private_t *dev_priv = dev->dev_private; drm_sis_private_t *dev_priv = dev->dev_private;
@ -231,8 +231,7 @@ static drm_local_map_t *sis_reg_init(struct drm_device *dev)
return NULL; return NULL;
} }
int int sis_idle(struct drm_device *dev)
sis_idle(struct drm_device *dev)
{ {
drm_sis_private_t *dev_priv = dev->dev_private; drm_sis_private_t *dev_priv = dev->dev_private;
uint32_t idle_reg; uint32_t idle_reg;

View File

@ -239,8 +239,10 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
return -ENOMEM; return -ENOMEM;
memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages); memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
down_read(&current->mm->mmap_sem); down_read(&current->mm->mmap_sem);
ret = get_user_pages(current, current->mm, (unsigned long) xfer->mem_addr, ret = get_user_pages(current, current->mm,
vsg->num_pages, (vsg->direction == DMA_FROM_DEVICE), (unsigned long)xfer->mem_addr,
vsg->num_pages,
(vsg->direction == DMA_FROM_DEVICE),
0, vsg->pages, NULL); 0, vsg->pages, NULL);
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
@ -509,7 +511,7 @@ via_dmablit_workqueue(struct work_struct *work)
#else #else
drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq); drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
#endif #endif
struct drm_device *dev = blitq->dev; struct drm_device *dev = blitq->dev;
unsigned long irqsave; unsigned long irqsave;
drm_via_sg_info_t *cur_sg; drm_via_sg_info_t *cur_sg;
int cur_released; int cur_released;

View File

@ -64,7 +64,7 @@ struct xgi_info {
struct drm_map *fb_map; struct drm_map *fb_map;
/* look up table parameters */ /* look up table parameters */
struct ati_pcigart_info gart_info; struct drm_ati_pcigart_info gart_info;
unsigned int lutPageSize; unsigned int lutPageSize;
struct drm_sman sman; struct drm_sman sman;

View File

@ -647,7 +647,7 @@ struct drm_fence_arg {
unsigned int signaled; unsigned int signaled;
unsigned int error; unsigned int error;
unsigned int sequence; unsigned int sequence;
unsigned int pad64; unsigned int pad64;
uint64_t expand_pad[2]; /*Future expansion */ uint64_t expand_pad[2]; /*Future expansion */
}; };
@ -879,7 +879,7 @@ struct drm_bo_version_arg {
struct drm_mm_type_arg { struct drm_mm_type_arg {
unsigned int mem_type; unsigned int mem_type;
unsigned int lock_flags; unsigned int lock_flags;
}; };
struct drm_mm_init_arg { struct drm_mm_init_arg {

View File

@ -171,7 +171,9 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
I915_WRITE(0x02080, dev_priv->dma_status_page); I915_WRITE(0x02080, dev_priv->dma_status_page);
} }
DRM_DEBUG("Enabled hardware status page\n"); DRM_DEBUG("Enabled hardware status page\n");
#ifdef I915_HAVE_BUFFER
mutex_init(&dev_priv->cmdbuf_mutex); mutex_init(&dev_priv->cmdbuf_mutex);
#endif
return 0; return 0;
} }
@ -393,7 +395,7 @@ static int i915_emit_box(struct drm_device * dev,
} }
/* XXX: Emitting the counter should really be moved to part of the IRQ /* XXX: Emitting the counter should really be moved to part of the IRQ
* emit. For now, do it in both places: * emit. For now, do it in both places:
*/ */
void i915_emit_breadcrumb(struct drm_device *dev) void i915_emit_breadcrumb(struct drm_device *dev)
@ -1161,7 +1163,8 @@ static int i915_setparam(struct drm_device *dev, void *data,
switch (param->param) { switch (param->param) {
case I915_SETPARAM_USE_MI_BATCHBUFFER_START: case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
dev_priv->use_mi_batchbuffer_start = param->value; if (!IS_I965G(dev))
dev_priv->use_mi_batchbuffer_start = param->value;
break; break;
case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
dev_priv->tex_lru_log_granularity = param->value; dev_priv->tex_lru_log_granularity = param->value;
@ -1209,27 +1212,27 @@ static int i915_mmio(struct drm_device *dev, void *data,
base = (u8 *) dev_priv->mmio_map->handle + e->offset; base = (u8 *) dev_priv->mmio_map->handle + e->offset;
switch (mmio->read_write) { switch (mmio->read_write) {
case I915_MMIO_READ: case I915_MMIO_READ:
if (!(e->flag & I915_MMIO_MAY_READ)) if (!(e->flag & I915_MMIO_MAY_READ))
return -EINVAL; return -EINVAL;
for (i = 0; i < e->size / 4; i++) for (i = 0; i < e->size / 4; i++)
buf[i] = I915_READ(e->offset + i * 4); buf[i] = I915_READ(e->offset + i * 4);
if (DRM_COPY_TO_USER(mmio->data, buf, e->size)) { if (DRM_COPY_TO_USER(mmio->data, buf, e->size)) {
DRM_ERROR("DRM_COPY_TO_USER failed\n"); DRM_ERROR("DRM_COPY_TO_USER failed\n");
return -EFAULT;
}
break;
case I915_MMIO_WRITE:
if (!(e->flag & I915_MMIO_MAY_WRITE))
return -EINVAL;
if(DRM_COPY_FROM_USER(buf, mmio->data, e->size)) {
DRM_ERROR("DRM_COPY_TO_USER failed\n");
return -EFAULT; return -EFAULT;
} }
break; for (i = 0; i < e->size / 4; i++)
I915_WRITE(e->offset + i * 4, buf[i]);
case I915_MMIO_WRITE: break;
if (!(e->flag & I915_MMIO_MAY_WRITE))
return -EINVAL;
if(DRM_COPY_FROM_USER(buf, mmio->data, e->size)) {
DRM_ERROR("DRM_COPY_TO_USER failed\n");
return -EFAULT;
}
for (i = 0; i < e->size / 4; i++)
I915_WRITE(e->offset + i * 4, buf[i]);
break;
} }
return 0; return 0;
} }

View File

@ -314,7 +314,7 @@ typedef struct drm_i915_mmio_entry {
unsigned int flag; unsigned int flag;
unsigned int offset; unsigned int offset;
unsigned int size; unsigned int size;
}drm_i915_mmio_entry_t; } drm_i915_mmio_entry_t;
typedef struct drm_i915_mmio { typedef struct drm_i915_mmio {
unsigned int read_write:1; unsigned int read_write:1;
@ -359,6 +359,7 @@ struct drm_i915_execbuffer {
uint64_t ops_list; uint64_t ops_list;
uint32_t num_buffers; uint32_t num_buffers;
struct drm_i915_batchbuffer batch; struct drm_i915_batchbuffer batch;
drm_context_t context; /* for lockless use in the future */
struct drm_fence_arg fence_arg; struct drm_fence_arg fence_arg;
}; };

View File

@ -326,10 +326,12 @@ extern int i915_move(struct drm_buffer_object *bo, int evict,
void i915_flush_ttm(struct drm_ttm *ttm); void i915_flush_ttm(struct drm_ttm *ttm);
#endif #endif
#ifdef __linux__
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
extern void intel_init_chipset_flush_compat(struct drm_device *dev); extern void intel_init_chipset_flush_compat(struct drm_device *dev);
extern void intel_fini_chipset_flush_compat(struct drm_device *dev); extern void intel_fini_chipset_flush_compat(struct drm_device *dev);
#endif #endif
#endif
/* modesetting */ /* modesetting */
@ -1263,16 +1265,16 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define PALETTE_A 0x0a000 #define PALETTE_A 0x0a000
#define PALETTE_B 0x0a800 #define PALETTE_B 0x0a800
#define IS_I830(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82830_CGC) #define IS_I830(dev) ((dev)->pci_device == 0x3577)
#define IS_845G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82845G_IG) #define IS_845G(dev) ((dev)->pci_device == 0x2562)
#define IS_I85X(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG) #define IS_I85X(dev) ((dev)->pci_device == 0x3582)
#define IS_I855(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG) #define IS_I855(dev) ((dev)->pci_device == 0x3582)
#define IS_I865G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82865_IG) #define IS_I865G(dev) ((dev)->pci_device == 0x2572)
#define IS_I915G(dev) (dev->pci_device == PCI_DEVICE_ID_INTEL_82915G_IG)/* || dev->pci_device == PCI_DEVICE_ID_INTELPCI_CHIP_E7221_G)*/ #define IS_I915G(dev) (dev->pci_device == 0x2582)/* || dev->pci_device == PCI_DEVICE_ID_INTELPCI_CHIP_E7221_G)*/
#define IS_I915GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82915GM_IG) #define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
#define IS_I945G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945G_IG) #define IS_I945G(dev) ((dev)->pci_device == 0x2772)
#define IS_I945GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945GM_IG) #define IS_I945GM(dev) ((dev)->pci_device == 0x27A2)
#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ #define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
(dev)->pci_device == 0x2982 || \ (dev)->pci_device == 0x2982 || \

View File

@ -46,7 +46,7 @@
#define MINIMAL_CLEANUP 0 #define MINIMAL_CLEANUP 0
#define FULL_CLEANUP 1 #define FULL_CLEANUP 1
static int mga_do_cleanup_dma(struct drm_device * dev, int full_cleanup); static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup);
/* ================================================================ /* ================================================================
* Engine control * Engine control
@ -395,7 +395,7 @@ int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf)
int mga_driver_load(struct drm_device *dev, unsigned long flags) int mga_driver_load(struct drm_device *dev, unsigned long flags)
{ {
drm_mga_private_t * dev_priv; drm_mga_private_t *dev_priv;
dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
if (!dev_priv) if (!dev_priv)
@ -436,10 +436,11 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags)
static int mga_do_agp_dma_bootstrap(struct drm_device *dev, static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
drm_mga_dma_bootstrap_t * dma_bs) drm_mga_dma_bootstrap_t * dma_bs)
{ {
drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private; drm_mga_private_t *const dev_priv =
(drm_mga_private_t *)dev->dev_private;
unsigned int warp_size = mga_warp_microcode_size(dev_priv); unsigned int warp_size = mga_warp_microcode_size(dev_priv);
int err; int err;
unsigned offset; unsigned offset;
const unsigned secondary_size = dma_bs->secondary_bin_count const unsigned secondary_size = dma_bs->secondary_bin_count
* dma_bs->secondary_bin_size; * dma_bs->secondary_bin_size;
const unsigned agp_size = (dma_bs->agp_size << 20); const unsigned agp_size = (dma_bs->agp_size << 20);
@ -481,11 +482,10 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
} }
} }
/* Allocate and bind AGP memory. */ /* Allocate and bind AGP memory. */
agp_req.size = agp_size; agp_req.size = agp_size;
agp_req.type = 0; agp_req.type = 0;
err = drm_agp_alloc( dev, & agp_req ); err = drm_agp_alloc(dev, &agp_req);
if (err) { if (err) {
dev_priv->agp_size = 0; dev_priv->agp_size = 0;
DRM_ERROR("Unable to allocate %uMB AGP memory\n", DRM_ERROR("Unable to allocate %uMB AGP memory\n",
@ -511,36 +511,36 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
warp_size = PAGE_SIZE; warp_size = PAGE_SIZE;
offset = 0; offset = 0;
err = drm_addmap( dev, offset, warp_size, err = drm_addmap(dev, offset, warp_size,
_DRM_AGP, _DRM_READ_ONLY, & dev_priv->warp ); _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp);
if (err) { if (err) {
DRM_ERROR("Unable to map WARP microcode: %d\n", err); DRM_ERROR("Unable to map WARP microcode: %d\n", err);
return err; return err;
} }
offset += warp_size; offset += warp_size;
err = drm_addmap( dev, offset, dma_bs->primary_size, err = drm_addmap(dev, offset, dma_bs->primary_size,
_DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary ); _DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary);
if (err) { if (err) {
DRM_ERROR("Unable to map primary DMA region: %d\n", err); DRM_ERROR("Unable to map primary DMA region: %d\n", err);
return err; return err;
} }
offset += dma_bs->primary_size; offset += dma_bs->primary_size;
err = drm_addmap( dev, offset, secondary_size, err = drm_addmap(dev, offset, secondary_size,
_DRM_AGP, 0, & dev->agp_buffer_map ); _DRM_AGP, 0, & dev->agp_buffer_map);
if (err) { if (err) {
DRM_ERROR("Unable to map secondary DMA region: %d\n", err); DRM_ERROR("Unable to map secondary DMA region: %d\n", err);
return err; return err;
} }
(void) memset( &req, 0, sizeof(req) ); (void)memset( &req, 0, sizeof(req) );
req.count = dma_bs->secondary_bin_count; req.count = dma_bs->secondary_bin_count;
req.size = dma_bs->secondary_bin_size; req.size = dma_bs->secondary_bin_size;
req.flags = _DRM_AGP_BUFFER; req.flags = _DRM_AGP_BUFFER;
req.agp_start = offset; req.agp_start = offset;
err = drm_addbufs_agp( dev, & req ); err = drm_addbufs_agp(dev, &req);
if (err) { if (err) {
DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err); DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
return err; return err;
@ -563,8 +563,8 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
#endif #endif
offset += secondary_size; offset += secondary_size;
err = drm_addmap( dev, offset, agp_size - offset, err = drm_addmap(dev, offset, agp_size - offset,
_DRM_AGP, 0, & dev_priv->agp_textures ); _DRM_AGP, 0, & dev_priv->agp_textures);
if (err) { if (err) {
DRM_ERROR("Unable to map AGP texture region: %d\n", err); DRM_ERROR("Unable to map AGP texture region: %d\n", err);
return err; return err;
@ -606,7 +606,8 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
static int mga_do_pci_dma_bootstrap(struct drm_device * dev, static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
drm_mga_dma_bootstrap_t * dma_bs) drm_mga_dma_bootstrap_t * dma_bs)
{ {
drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private; drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
unsigned int warp_size = mga_warp_microcode_size(dev_priv); unsigned int warp_size = mga_warp_microcode_size(dev_priv);
unsigned int primary_size; unsigned int primary_size;
unsigned int bin_count; unsigned int bin_count;
@ -639,9 +640,8 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
* alignment of the primary or secondary DMA buffers. * alignment of the primary or secondary DMA buffers.
*/ */
for ( primary_size = dma_bs->primary_size for (primary_size = dma_bs->primary_size; primary_size != 0;
; primary_size != 0 primary_size >>= 1 ) {
; primary_size >>= 1 ) {
/* The proper alignment for this mapping is 0x04 */ /* The proper alignment for this mapping is 0x04 */
err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT, err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
_DRM_READ_ONLY, &dev_priv->primary); _DRM_READ_ONLY, &dev_priv->primary);
@ -657,18 +657,17 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
if (dev_priv->primary->size != dma_bs->primary_size) { if (dev_priv->primary->size != dma_bs->primary_size) {
DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n", DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n",
dma_bs->primary_size, dma_bs->primary_size,
(unsigned) dev_priv->primary->size); (unsigned)dev_priv->primary->size);
dma_bs->primary_size = dev_priv->primary->size; dma_bs->primary_size = dev_priv->primary->size;
} }
for ( bin_count = dma_bs->secondary_bin_count for (bin_count = dma_bs->secondary_bin_count; bin_count > 0;
; bin_count > 0 bin_count-- ) {
; bin_count-- ) { (void)memset(&req, 0, sizeof(req));
(void) memset( &req, 0, sizeof(req) );
req.count = bin_count; req.count = bin_count;
req.size = dma_bs->secondary_bin_size; req.size = dma_bs->secondary_bin_size;
err = drm_addbufs_pci( dev, & req ); err = drm_addbufs_pci(dev, &req);
if (!err) { if (!err) {
break; break;
} }
@ -696,12 +695,12 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
} }
static int mga_do_dma_bootstrap(struct drm_device * dev, static int mga_do_dma_bootstrap(struct drm_device *dev,
drm_mga_dma_bootstrap_t * dma_bs) drm_mga_dma_bootstrap_t *dma_bs)
{ {
const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev); const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
int err; int err;
drm_mga_private_t * const dev_priv = drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private; (drm_mga_private_t *) dev->dev_private;
@ -710,17 +709,17 @@ static int mga_do_dma_bootstrap(struct drm_device * dev,
/* The first steps are the same for both PCI and AGP based DMA. Map /* The first steps are the same for both PCI and AGP based DMA. Map
* the cards MMIO registers and map a status page. * the cards MMIO registers and map a status page.
*/ */
err = drm_addmap( dev, dev_priv->mmio_base, dev_priv->mmio_size, err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size,
_DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio ); _DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio);
if (err) { if (err) {
DRM_ERROR("Unable to map MMIO region: %d\n", err); DRM_ERROR("Unable to map MMIO region: %d\n", err);
return err; return err;
} }
err = drm_addmap( dev, 0, SAREA_MAX, _DRM_SHM, err = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
_DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL, _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
& dev_priv->status ); & dev_priv->status);
if (err) { if (err) {
DRM_ERROR("Unable to map status region: %d\n", err); DRM_ERROR("Unable to map status region: %d\n", err);
return err; return err;
@ -768,7 +767,7 @@ int mga_dma_bootstrap(struct drm_device *dev, void *data,
drm_mga_dma_bootstrap_t *bootstrap = data; drm_mga_dma_bootstrap_t *bootstrap = data;
int err; int err;
static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 }; static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
const drm_mga_private_t * const dev_priv = const drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private; (drm_mga_private_t *) dev->dev_private;
@ -829,7 +828,7 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
return -EINVAL; return -EINVAL;
} }
if (! dev_priv->used_new_dma_init) { if (!dev_priv->used_new_dma_init) {
dev_priv->dma_access = MGA_PAGPXFER; dev_priv->dma_access = MGA_PAGPXFER;
dev_priv->wagp_enable = MGA_WAGP_ENABLE; dev_priv->wagp_enable = MGA_WAGP_ENABLE;
@ -855,7 +854,8 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
return -EINVAL; return -EINVAL;
} }
dev->agp_buffer_token = init->buffers_offset; dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); dev->agp_buffer_map =
drm_core_findmap(dev, init->buffers_offset);
if (!dev->agp_buffer_map) { if (!dev->agp_buffer_map) {
DRM_ERROR("failed to find dma buffer region!\n"); DRM_ERROR("failed to find dma buffer region!\n");
return -EINVAL; return -EINVAL;
@ -898,10 +898,6 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
/* Init the primary DMA registers. /* Init the primary DMA registers.
*/ */
MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL); MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL);
#if 0
MGA_WRITE(MGA_PRIMPTR, virt_to_bus((void *)dev_priv->prim.status) | MGA_PRIMPTREN0 | /* Soft trap, SECEND, SETUPEND */
MGA_PRIMPTREN1); /* DWGSYNC */
#endif
dev_priv->prim.start = (u8 *) dev_priv->primary->handle; dev_priv->prim.start = (u8 *) dev_priv->primary->handle;
dev_priv->prim.end = ((u8 *) dev_priv->primary->handle dev_priv->prim.end = ((u8 *) dev_priv->primary->handle
@ -932,7 +928,7 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
return 0; return 0;
} }
static int mga_do_cleanup_dma(struct drm_device * dev, int full_cleanup) static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
{ {
int err = 0; int err = 0;
DRM_DEBUG("\n"); DRM_DEBUG("\n");
@ -993,7 +989,8 @@ static int mga_do_cleanup_dma(struct drm_device * dev, int full_cleanup)
memset(&dev_priv->prim, 0, sizeof(dev_priv->prim)); memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
dev_priv->warp_pipe = 0; dev_priv->warp_pipe = 0;
memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys)); memset(dev_priv->warp_pipe_phys, 0,
sizeof(dev_priv->warp_pipe_phys));
if (dev_priv->head != NULL) { if (dev_priv->head != NULL) {
mga_freelist_cleanup(dev); mga_freelist_cleanup(dev);
@ -1015,7 +1012,7 @@ int mga_dma_init(struct drm_device *dev, void *data,
case MGA_INIT_DMA: case MGA_INIT_DMA:
err = mga_do_init_dma(dev, init); err = mga_do_init_dma(dev, init);
if (err) { if (err) {
(void) mga_do_cleanup_dma(dev, FULL_CLEANUP); (void)mga_do_cleanup_dma(dev, FULL_CLEANUP);
} }
return err; return err;
case MGA_CLEANUP_DMA: case MGA_CLEANUP_DMA:

View File

@ -57,7 +57,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
/* SOFTRAP interrupt */ /* SOFTRAP interrupt */
if (status & MGA_SOFTRAPEN) { if (status & MGA_SOFTRAPEN) {
const u32 prim_start = MGA_READ(MGA_PRIMADDRESS); const u32 prim_start = MGA_READ(MGA_PRIMADDRESS);
const u32 prim_end = MGA_READ(MGA_PRIMEND); const u32 prim_end = MGA_READ(MGA_PRIMEND);
MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR); MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);
@ -65,7 +65,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
/* In addition to clearing the interrupt-pending bit, we /* In addition to clearing the interrupt-pending bit, we
* have to write to MGA_PRIMEND to re-start the DMA operation. * have to write to MGA_PRIMEND to re-start the DMA operation.
*/ */
if ( (prim_start & ~0x03) != (prim_end & ~0x03) ) { if ((prim_start & ~0x03) != (prim_end & ~0x03)) {
MGA_WRITE(MGA_PRIMEND, prim_end); MGA_WRITE(MGA_PRIMEND, prim_end);
} }
@ -74,9 +74,8 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
handled = 1; handled = 1;
} }
if ( handled ) { if (handled)
return IRQ_HANDLED; return IRQ_HANDLED;
}
return IRQ_NONE; return IRQ_NONE;
} }
@ -131,7 +130,7 @@ void mga_driver_irq_postinstall(struct drm_device * dev)
{ {
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
DRM_INIT_WAITQUEUE( &dev_priv->fence_queue ); DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
/* Turn on vertical blank interrupt and soft trap interrupt. */ /* Turn on vertical blank interrupt and soft trap interrupt. */
MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);

View File

@ -62,8 +62,7 @@ static void mga_emit_clip_rect(drm_mga_private_t * dev_priv,
} }
DMA_BLOCK(MGA_DMAPAD, 0x00000000, DMA_BLOCK(MGA_DMAPAD, 0x00000000,
MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1, MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1,
MGA_YTOP, box->y1 * pitch, MGA_YTOP, box->y1 * pitch, MGA_YBOT, (box->y2 - 1) * pitch);
MGA_YBOT, (box->y2 - 1) * pitch);
ADVANCE_DMA(); ADVANCE_DMA();
} }
@ -78,18 +77,15 @@ static __inline__ void mga_g200_emit_context(drm_mga_private_t * dev_priv)
DMA_BLOCK(MGA_DSTORG, ctx->dstorg, DMA_BLOCK(MGA_DSTORG, ctx->dstorg,
MGA_MACCESS, ctx->maccess, MGA_MACCESS, ctx->maccess,
MGA_PLNWT, ctx->plnwt, MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl);
MGA_DWGCTL, ctx->dwgctl);
DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl, DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl,
MGA_FOGCOL, ctx->fogcolor, MGA_FOGCOL, ctx->fogcolor,
MGA_WFLAG, ctx->wflag, MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset);
MGA_ZORG, dev_priv->depth_offset);
DMA_BLOCK(MGA_FCOL, ctx->fcol, DMA_BLOCK(MGA_FCOL, ctx->fcol,
MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
MGA_DMAPAD, 0x00000000);
ADVANCE_DMA(); ADVANCE_DMA();
} }

View File

@ -307,20 +307,29 @@ nouveau_card_init(struct drm_device *dev)
DRM_MEMORYBARRIER(); DRM_MEMORYBARRIER();
#endif #endif
#if defined(__powerpc__) #if defined(__linux__) && defined(__powerpc__)
/* if we have an OF card, copy vbios to RAMIN */ /* if we have an OF card, copy vbios to RAMIN */
dn = pci_device_to_OF_node(dev->pdev); dn = pci_device_to_OF_node(dev->pdev);
if (dn) if (dn)
{ {
int size; int size;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
const uint32_t *bios = of_get_property(dn, "NVDA,BMP", &size); const uint32_t *bios = of_get_property(dn, "NVDA,BMP", &size);
#else
const uint32_t *bios = get_property(dn, "NVDA,BMP", &size);
#endif
if (bios) if (bios)
{ {
int i; int i;
for(i=0;i<size;i+=4) for(i=0;i<size;i+=4)
NV_WI32(i, bios[i/4]); NV_WI32(i, bios[i/4]);
DRM_INFO("OF bios successfully copied (%d bytes)\n",size);
} }
else
DRM_INFO("Unable to get the OF bios\n");
} }
else
DRM_INFO("Unable to get the OF node\n");
#endif #endif
/* Determine exact chipset we're running on */ /* Determine exact chipset we're running on */

View File

@ -29,7 +29,7 @@
#define NV2A_GRCTX_SIZE (3500*4) #define NV2A_GRCTX_SIZE (3500*4)
#define NV30_31_GRCTX_SIZE (24392) #define NV30_31_GRCTX_SIZE (24392)
#define NV34_GRCTX_SIZE (22000) #define NV34_GRCTX_SIZE (18140)
#define NV35_36_GRCTX_SIZE (22396) #define NV35_36_GRCTX_SIZE (22396)
static void nv20_graph_context_init(struct drm_device *dev, static void nv20_graph_context_init(struct drm_device *dev,

View File

@ -118,7 +118,7 @@ typedef struct drm_r128_private {
drm_local_map_t *cce_ring; drm_local_map_t *cce_ring;
drm_local_map_t *ring_rptr; drm_local_map_t *ring_rptr;
drm_local_map_t *agp_textures; drm_local_map_t *agp_textures;
struct ati_pcigart_info gart_info; struct drm_ati_pcigart_info gart_info;
} drm_r128_private_t; } drm_r128_private_t;
typedef struct drm_r128_buf_priv { typedef struct drm_r128_buf_priv {

View File

@ -816,6 +816,21 @@ static const u32 R300_cp_microcode[][2] = {
{ 0000000000, 0000000000 }, { 0000000000, 0000000000 },
}; };
u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
{
return RADEON_READ(RADEON_MC_FB_LOCATION);
}
static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
{
RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc);
}
static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc)
{
RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc);
}
static int RADEON_READ_PLL(struct drm_device * dev, int addr) static int RADEON_READ_PLL(struct drm_device * dev, int addr)
{ {
drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_private_t *dev_priv = dev->dev_private;
@ -1134,14 +1149,14 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
* always appended to the fb which is not necessarily the case * always appended to the fb which is not necessarily the case
*/ */
if (!dev_priv->new_memmap) if (!dev_priv->new_memmap)
RADEON_WRITE(RADEON_MC_FB_LOCATION, radeon_write_fb_location(dev_priv,
((dev_priv->gart_vm_start - 1) & 0xffff0000) ((dev_priv->gart_vm_start - 1) & 0xffff0000)
| (dev_priv->fb_location >> 16)); | (dev_priv->fb_location >> 16));
#if __OS_HAS_AGP #if __OS_HAS_AGP
if (dev_priv->flags & RADEON_IS_AGP) { if (dev_priv->flags & RADEON_IS_AGP) {
RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base); RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
RADEON_WRITE(RADEON_MC_AGP_LOCATION, radeon_write_agp_location(dev_priv,
(((dev_priv->gart_vm_start - 1 + (((dev_priv->gart_vm_start - 1 +
dev_priv->gart_size) & 0xffff0000) | dev_priv->gart_size) & 0xffff0000) |
(dev_priv->gart_vm_start >> 16))); (dev_priv->gart_vm_start >> 16)));
@ -1305,7 +1320,7 @@ static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev_priv->gart_vm_start); RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev_priv->gart_vm_start);
dev_priv->gart_size = 32*1024*1024; dev_priv->gart_size = 32*1024*1024;
RADEON_WRITE(RADEON_MC_AGP_LOCATION, radeon_write_agp_location(dev_priv,
(((dev_priv->gart_vm_start - 1 + (((dev_priv->gart_vm_start - 1 +
dev_priv->gart_size) & 0xffff0000) | dev_priv->gart_size) & 0xffff0000) |
(dev_priv->gart_vm_start >> 16))); (dev_priv->gart_vm_start >> 16)));
@ -1339,7 +1354,7 @@ static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
dev_priv->gart_vm_start + dev_priv->gart_vm_start +
dev_priv->gart_size - 1); dev_priv->gart_size - 1);
RADEON_WRITE(RADEON_MC_AGP_LOCATION, 0xffffffc0); /* ?? */ radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
RADEON_PCIE_TX_GART_EN); RADEON_PCIE_TX_GART_EN);
@ -1382,7 +1397,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
/* Turn off AGP aperture -- is this required for PCI GART? /* Turn off AGP aperture -- is this required for PCI GART?
*/ */
RADEON_WRITE(RADEON_MC_AGP_LOCATION, 0xffffffc0); /* ?? */ radeon_write_agp_location(dev_priv, 0xffffffc0);
RADEON_WRITE(RADEON_AGP_COMMAND, 0); /* clear AGP_COMMAND */ RADEON_WRITE(RADEON_AGP_COMMAND, 0); /* clear AGP_COMMAND */
} else { } else {
RADEON_WRITE(RADEON_AIC_CNTL, RADEON_WRITE(RADEON_AIC_CNTL,
@ -1612,10 +1627,9 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
dev->agp_buffer_map->handle); dev->agp_buffer_map->handle);
} }
dev_priv->fb_location = (RADEON_READ(RADEON_MC_FB_LOCATION) dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16;
& 0xffff) << 16;
dev_priv->fb_size = dev_priv->fb_size =
((RADEON_READ(RADEON_MC_FB_LOCATION) & 0xffff0000u) + 0x10000) ((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000)
- dev_priv->fb_location; - dev_priv->fb_location;
dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) | dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) |

View File

@ -665,6 +665,7 @@ typedef struct drm_radeon_indirect {
#define RADEON_PARAM_SCRATCH_OFFSET 11 #define RADEON_PARAM_SCRATCH_OFFSET 11
#define RADEON_PARAM_CARD_TYPE 12 #define RADEON_PARAM_CARD_TYPE 12
#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */ #define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */
#define RADEON_PARAM_FB_LOCATION 14 /* FB location */
typedef struct drm_radeon_getparam { typedef struct drm_radeon_getparam {
int param; int param;

View File

@ -297,11 +297,11 @@ typedef struct drm_radeon_private {
int irq_enabled; int irq_enabled;
struct radeon_surface surfaces[RADEON_MAX_SURFACES]; struct radeon_surface surfaces[RADEON_MAX_SURFACES];
struct radeon_virt_surface virt_surfaces[2*RADEON_MAX_SURFACES]; struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES];
unsigned long pcigart_offset; unsigned long pcigart_offset;
unsigned int pcigart_offset_set; unsigned int pcigart_offset_set;
struct ati_pcigart_info gart_info; struct drm_ati_pcigart_info gart_info;
u32 scratch_ages[5]; u32 scratch_ages[5];
@ -352,6 +352,7 @@ extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_fi
extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern void radeon_gart_flush(struct drm_device *dev); extern void radeon_gart_flush(struct drm_device *dev);
extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
extern void radeon_freelist_reset(struct drm_device * dev); extern void radeon_freelist_reset(struct drm_device * dev);
extern struct drm_buf *radeon_freelist_get(struct drm_device * dev); extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
@ -402,7 +403,7 @@ extern void r300_init_reg_flags(void);
extern int r300_do_cp_cmdbuf(struct drm_device *dev, extern int r300_do_cp_cmdbuf(struct drm_device *dev,
struct drm_file *file_priv, struct drm_file *file_priv,
drm_radeon_kcmd_buffer_t* cmdbuf); drm_radeon_kcmd_buffer_t *cmdbuf);
#ifdef RADEON_HAVE_FENCE #ifdef RADEON_HAVE_FENCE

View File

@ -147,8 +147,7 @@ static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
} }
static int radeon_driver_vblank_do_wait(struct drm_device * dev, static int radeon_driver_vblank_do_wait(struct drm_device * dev,
unsigned int *sequence, unsigned int *sequence, int crtc)
int crtc)
{ {
drm_radeon_private_t *dev_priv = drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private; (drm_radeon_private_t *) dev->dev_private;

View File

@ -3081,6 +3081,9 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
case RADEON_PARAM_VBLANK_CRTC: case RADEON_PARAM_VBLANK_CRTC:
value = radeon_vblank_crtc_get(dev); value = radeon_vblank_crtc_get(dev);
break; break;
case RADEON_PARAM_FB_LOCATION:
value = radeon_read_fb_location(dev_priv);
break;
default: default:
DRM_DEBUG( "Invalid parameter %d\n", param->param ); DRM_DEBUG( "Invalid parameter %d\n", param->param );
return -EINVAL; return -EINVAL;
@ -3154,7 +3157,7 @@ static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_fil
* *
* DRM infrastructure takes care of reclaiming dma buffers. * DRM infrastructure takes care of reclaiming dma buffers.
*/ */
void radeon_driver_preclose(struct drm_device * dev, void radeon_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
if (dev->dev_private) { if (dev->dev_private) {
@ -3166,7 +3169,7 @@ void radeon_driver_preclose(struct drm_device * dev,
} }
} }
void radeon_driver_lastclose(struct drm_device * dev) void radeon_driver_lastclose(struct drm_device *dev)
{ {
if (dev->dev_private) { if (dev->dev_private) {
drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_private_t *dev_priv = dev->dev_private;
@ -3179,7 +3182,7 @@ void radeon_driver_lastclose(struct drm_device * dev)
radeon_do_release(dev); radeon_do_release(dev);
} }
int radeon_driver_open(struct drm_device * dev, struct drm_file *file_priv) int radeon_driver_open(struct drm_device *dev, struct drm_file *file_priv)
{ {
drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_private_t *dev_priv = dev->dev_private;
struct drm_radeon_driver_file_fields *radeon_priv; struct drm_radeon_driver_file_fields *radeon_priv;
@ -3201,7 +3204,7 @@ int radeon_driver_open(struct drm_device * dev, struct drm_file *file_priv)
return 0; return 0;
} }
void radeon_driver_postclose(struct drm_device * dev, struct drm_file *file_priv) void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
{ {
struct drm_radeon_driver_file_fields *radeon_priv = struct drm_radeon_driver_file_fields *radeon_priv =
file_priv->driver_priv; file_priv->driver_priv;

View File

@ -364,7 +364,7 @@ uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
unsigned int cur = dev_priv->current_dma_page; unsigned int cur = dev_priv->current_dma_page;
unsigned int rest = SAVAGE_DMA_PAGE_SIZE - unsigned int rest = SAVAGE_DMA_PAGE_SIZE -
dev_priv->dma_pages[cur].used; dev_priv->dma_pages[cur].used;
unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE-1) / unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) /
SAVAGE_DMA_PAGE_SIZE; SAVAGE_DMA_PAGE_SIZE;
uint32_t *dma_ptr; uint32_t *dma_ptr;
unsigned int i; unsigned int i;
@ -374,7 +374,7 @@ uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
if (cur + nr_pages < dev_priv->nr_dma_pages) { if (cur + nr_pages < dev_priv->nr_dma_pages) {
dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
cur*SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
if (n < rest) if (n < rest)
rest = n; rest = n;
dev_priv->dma_pages[cur].used += rest; dev_priv->dma_pages[cur].used += rest;
@ -383,7 +383,7 @@ uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
} else { } else {
dev_priv->dma_flush(dev_priv); dev_priv->dma_flush(dev_priv);
nr_pages = nr_pages =
(n + SAVAGE_DMA_PAGE_SIZE-1) / SAVAGE_DMA_PAGE_SIZE; (n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE;
for (i = cur; i < dev_priv->nr_dma_pages; ++i) { for (i = cur; i < dev_priv->nr_dma_pages; ++i) {
dev_priv->dma_pages[i].age = dev_priv->last_dma_age; dev_priv->dma_pages[i].age = dev_priv->last_dma_age;
dev_priv->dma_pages[i].used = 0; dev_priv->dma_pages[i].used = 0;
@ -443,7 +443,7 @@ static void savage_dma_flush(drm_savage_private_t *dev_priv)
uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
dev_priv->dma_pages[cur].used += pad; dev_priv->dma_pages[cur].used += pad;
while(pad != 0) { while (pad != 0) {
*dma_ptr++ = BCI_CMD_WAIT; *dma_ptr++ = BCI_CMD_WAIT;
pad--; pad--;
} }
@ -587,12 +587,12 @@ int savage_driver_firstopen(struct drm_device *dev)
dev_priv->mtrr[0].handle = dev_priv->mtrr[0].handle =
drm_mtrr_add(dev_priv->mtrr[0].base, drm_mtrr_add(dev_priv->mtrr[0].base,
dev_priv->mtrr[0].size, DRM_MTRR_WC); dev_priv->mtrr[0].size, DRM_MTRR_WC);
dev_priv->mtrr[1].base = fb_base+0x02000000; dev_priv->mtrr[1].base = fb_base + 0x02000000;
dev_priv->mtrr[1].size = 0x02000000; dev_priv->mtrr[1].size = 0x02000000;
dev_priv->mtrr[1].handle = dev_priv->mtrr[1].handle =
drm_mtrr_add(dev_priv->mtrr[1].base, drm_mtrr_add(dev_priv->mtrr[1].base,
dev_priv->mtrr[1].size, DRM_MTRR_WC); dev_priv->mtrr[1].size, DRM_MTRR_WC);
dev_priv->mtrr[2].base = fb_base+0x04000000; dev_priv->mtrr[2].base = fb_base + 0x04000000;
dev_priv->mtrr[2].size = 0x04000000; dev_priv->mtrr[2].size = 0x04000000;
dev_priv->mtrr[2].handle = dev_priv->mtrr[2].handle =
drm_mtrr_add(dev_priv->mtrr[2].base, drm_mtrr_add(dev_priv->mtrr[2].base,
@ -833,7 +833,7 @@ static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init)
depth_tile_format = SAVAGE_BD_TILE_DEST; depth_tile_format = SAVAGE_BD_TILE_DEST;
} }
front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8); front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8);
back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8); back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8);
depth_stride = depth_stride =
dev_priv->depth_pitch / (dev_priv->depth_bpp / 8); dev_priv->depth_pitch / (dev_priv->depth_bpp / 8);
@ -888,7 +888,7 @@ static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init)
return -ENOMEM; return -ENOMEM;
} }
if (savage_dma_init(dev_priv) < 0) { if (savage_dma_init(dev_priv) < 0) {
DRM_ERROR("could not initialize command DMA\n"); DRM_ERROR("could not initialize command DMA\n");
savage_do_cleanup_bci(dev); savage_do_cleanup_bci(dev);
return -ENOMEM; return -ENOMEM;
@ -983,7 +983,7 @@ static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_
* - event counter wrapped since the event was emitted or * - event counter wrapped since the event was emitted or
* - the hardware has advanced up to or over the event to wait for. * - the hardware has advanced up to or over the event to wait for.
*/ */
if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e) ) if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e))
return 0; return 0;
else else
return dev_priv->wait_evnt(dev_priv, event_e); return dev_priv->wait_evnt(dev_priv, event_e);
@ -1065,8 +1065,6 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
if (!dma->buflist) if (!dma->buflist)
return; return;
/*i830_flush_queue(dev);*/
for (i = 0; i < dma->buf_count; i++) { for (i = 0; i < dma->buf_count; i++) {
struct drm_buf *buf = dma->buflist[i]; struct drm_buf *buf = dma->buflist[i];
drm_savage_buf_priv_t *buf_priv = buf->dev_private; drm_savage_buf_priv_t *buf_priv = buf->dev_private;

View File

@ -30,23 +30,23 @@ void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
const struct drm_clip_rect *pbox) const struct drm_clip_rect *pbox)
{ {
uint32_t scstart = dev_priv->state.s3d.new_scstart; uint32_t scstart = dev_priv->state.s3d.new_scstart;
uint32_t scend = dev_priv->state.s3d.new_scend; uint32_t scend = dev_priv->state.s3d.new_scend;
scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) | scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) |
((uint32_t)pbox->x1 & 0x000007ff) | ((uint32_t)pbox->x1 & 0x000007ff) |
(((uint32_t)pbox->y1 << 16) & 0x07ff0000); (((uint32_t)pbox->y1 << 16) & 0x07ff0000);
scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) | scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) |
(((uint32_t)pbox->x2-1) & 0x000007ff) | (((uint32_t)pbox->x2 - 1) & 0x000007ff) |
((((uint32_t)pbox->y2-1) << 16) & 0x07ff0000); ((((uint32_t)pbox->y2 - 1) << 16) & 0x07ff0000);
if (scstart != dev_priv->state.s3d.scstart || if (scstart != dev_priv->state.s3d.scstart ||
scend != dev_priv->state.s3d.scend) { scend != dev_priv->state.s3d.scend) {
DMA_LOCALS; DMA_LOCALS;
BEGIN_DMA(4); BEGIN_DMA(4);
DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D); DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2); DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2);
DMA_WRITE(scstart); DMA_WRITE(scstart);
DMA_WRITE(scend); DMA_WRITE(scend);
dev_priv->state.s3d.scstart = scstart; dev_priv->state.s3d.scstart = scstart;
dev_priv->state.s3d.scend = scend; dev_priv->state.s3d.scend = scend;
dev_priv->waiting = 1; dev_priv->waiting = 1;
DMA_COMMIT(); DMA_COMMIT();
} }
@ -61,13 +61,13 @@ void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
((uint32_t)pbox->x1 & 0x000007ff) | ((uint32_t)pbox->x1 & 0x000007ff) |
(((uint32_t)pbox->y1 << 12) & 0x00fff000); (((uint32_t)pbox->y1 << 12) & 0x00fff000);
drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) | drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) |
(((uint32_t)pbox->x2-1) & 0x000007ff) | (((uint32_t)pbox->x2 - 1) & 0x000007ff) |
((((uint32_t)pbox->y2-1) << 12) & 0x00fff000); ((((uint32_t)pbox->y2 - 1) << 12) & 0x00fff000);
if (drawctrl0 != dev_priv->state.s4.drawctrl0 || if (drawctrl0 != dev_priv->state.s4.drawctrl0 ||
drawctrl1 != dev_priv->state.s4.drawctrl1) { drawctrl1 != dev_priv->state.s4.drawctrl1) {
DMA_LOCALS; DMA_LOCALS;
BEGIN_DMA(4); BEGIN_DMA(4);
DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D); DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2); DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2);
DMA_WRITE(drawctrl0); DMA_WRITE(drawctrl0);
DMA_WRITE(drawctrl1); DMA_WRITE(drawctrl1);
@ -87,8 +87,8 @@ static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,
} }
if (!(addr & 1)) { /* local */ if (!(addr & 1)) { /* local */
addr &= ~7; addr &= ~7;
if (addr < dev_priv->texture_offset || if (addr < dev_priv->texture_offset ||
addr >= dev_priv->texture_offset+dev_priv->texture_size) { addr >= dev_priv->texture_offset + dev_priv->texture_size) {
DRM_ERROR DRM_ERROR
("bad texAddr%d %08x (local addr out of range)\n", ("bad texAddr%d %08x (local addr out of range)\n",
unit, addr); unit, addr);
@ -114,10 +114,10 @@ static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,
} }
#define SAVE_STATE(reg,where) \ #define SAVE_STATE(reg,where) \
if(start <= reg && start+count > reg) \ if(start <= reg && start + count > reg) \
dev_priv->state.where = regs[reg - start] dev_priv->state.where = regs[reg - start]
#define SAVE_STATE_MASK(reg,where,mask) do { \ #define SAVE_STATE_MASK(reg,where,mask) do { \
if(start <= reg && start+count > reg) { \ if(start <= reg && start + count > reg) { \
uint32_t tmp; \ uint32_t tmp; \
tmp = regs[reg - start]; \ tmp = regs[reg - start]; \
dev_priv->state.where = (tmp & (mask)) | \ dev_priv->state.where = (tmp & (mask)) | \
@ -129,9 +129,9 @@ static int savage_verify_state_s3d(drm_savage_private_t *dev_priv,
const uint32_t *regs) const uint32_t *regs)
{ {
if (start < SAVAGE_TEXPALADDR_S3D || if (start < SAVAGE_TEXPALADDR_S3D ||
start+count-1 > SAVAGE_DESTTEXRWWATERMARK_S3D) { start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
start, start+count-1); start, start + count - 1);
return -EINVAL; return -EINVAL;
} }
@ -142,7 +142,7 @@ static int savage_verify_state_s3d(drm_savage_private_t *dev_priv,
/* if any texture regs were changed ... */ /* if any texture regs were changed ... */
if (start <= SAVAGE_TEXCTRL_S3D && if (start <= SAVAGE_TEXCTRL_S3D &&
start+count > SAVAGE_TEXPALADDR_S3D) { start + count > SAVAGE_TEXPALADDR_S3D) {
/* ... check texture state */ /* ... check texture state */
SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl); SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl);
SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr); SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
@ -161,9 +161,9 @@ static int savage_verify_state_s4(drm_savage_private_t *dev_priv,
int ret = 0; int ret = 0;
if (start < SAVAGE_DRAWLOCALCTRL_S4 || if (start < SAVAGE_DRAWLOCALCTRL_S4 ||
start+count-1 > SAVAGE_TEXBLENDCOLOR_S4) { start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) {
DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
start, start+count-1); start, start + count - 1);
return -EINVAL; return -EINVAL;
} }
@ -212,14 +212,14 @@ static int savage_dispatch_state(drm_savage_private_t *dev_priv,
return ret; return ret;
/* scissor regs are emitted in savage_dispatch_draw */ /* scissor regs are emitted in savage_dispatch_draw */
if (start < SAVAGE_SCSTART_S3D) { if (start < SAVAGE_SCSTART_S3D) {
if (start+count > SAVAGE_SCEND_S3D+1) if (start + count > SAVAGE_SCEND_S3D + 1)
count2 = count - (SAVAGE_SCEND_S3D+1 - start); count2 = count - (SAVAGE_SCEND_S3D + 1 - start);
if (start+count > SAVAGE_SCSTART_S3D) if (start + count > SAVAGE_SCSTART_S3D)
count = SAVAGE_SCSTART_S3D - start; count = SAVAGE_SCSTART_S3D - start;
} else if (start <= SAVAGE_SCEND_S3D) { } else if (start <= SAVAGE_SCEND_S3D) {
if (start+count > SAVAGE_SCEND_S3D+1) { if (start + count > SAVAGE_SCEND_S3D + 1) {
count -= SAVAGE_SCEND_S3D+1 - start; count -= SAVAGE_SCEND_S3D + 1 - start;
start = SAVAGE_SCEND_S3D+1; start = SAVAGE_SCEND_S3D + 1;
} else } else
return 0; return 0;
} }
@ -229,24 +229,24 @@ static int savage_dispatch_state(drm_savage_private_t *dev_priv,
return ret; return ret;
/* scissor regs are emitted in savage_dispatch_draw */ /* scissor regs are emitted in savage_dispatch_draw */
if (start < SAVAGE_DRAWCTRL0_S4) { if (start < SAVAGE_DRAWCTRL0_S4) {
if (start+count > SAVAGE_DRAWCTRL1_S4+1) if (start + count > SAVAGE_DRAWCTRL1_S4 + 1)
count2 = count - count2 = count -
(SAVAGE_DRAWCTRL1_S4 + 1 - start); (SAVAGE_DRAWCTRL1_S4 + 1 - start);
if (start+count > SAVAGE_DRAWCTRL0_S4) if (start + count > SAVAGE_DRAWCTRL0_S4)
count = SAVAGE_DRAWCTRL0_S4 - start; count = SAVAGE_DRAWCTRL0_S4 - start;
} else if (start <= SAVAGE_DRAWCTRL1_S4) { } else if (start <= SAVAGE_DRAWCTRL1_S4) {
if (start+count > SAVAGE_DRAWCTRL1_S4+1) { if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) {
count -= SAVAGE_DRAWCTRL1_S4+1 - start; count -= SAVAGE_DRAWCTRL1_S4 + 1 - start;
start = SAVAGE_DRAWCTRL1_S4+1; start = SAVAGE_DRAWCTRL1_S4 + 1;
} else } else
return 0; return 0;
} }
} }
bci_size = count + (count+254)/255 + count2 + (count2+254)/255; bci_size = count + (count + 254) / 255 + count2 + (count2 + 254) / 255;
if (cmd_header->state.global) { if (cmd_header->state.global) {
BEGIN_DMA(bci_size+1); BEGIN_DMA(bci_size + 1);
DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D); DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
dev_priv->waiting = 1; dev_priv->waiting = 1;
} else { } else {
@ -286,8 +286,8 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
BCI_LOCALS; BCI_LOCALS;
if (!dmabuf) { if (!dmabuf) {
DRM_ERROR("called without dma buffers!\n"); DRM_ERROR("called without dma buffers!\n");
return -EINVAL; return -EINVAL;
} }
if (!n) if (!n)
@ -337,9 +337,9 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
} }
} }
if (start + n > dmabuf->total/32) { if (start + n > dmabuf->total / 32) {
DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
start, start + n - 1, dmabuf->total/32); start, start + n - 1, dmabuf->total / 32);
return -EINVAL; return -EINVAL;
} }
@ -374,33 +374,33 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
/* Need to reorder indices for correct flat /* Need to reorder indices for correct flat
* shading while preserving the clock sense * shading while preserving the clock sense
* for correct culling. Only on Savage3D. */ * for correct culling. Only on Savage3D. */
int reorder[3] = {-1, -1, -1}; int reorder[3] = { -1, -1, -1 };
reorder[start%3] = 2; reorder[start % 3] = 2;
BEGIN_BCI((count+1+1)/2); BEGIN_BCI((count + 1 + 1) / 2);
BCI_DRAW_INDICES_S3D(count, prim, start+2); BCI_DRAW_INDICES_S3D(count, prim, start + 2);
for (i = start+1; i+1 < start+count; i += 2) for (i = start + 1; i + 1 < start + count; i += 2)
BCI_WRITE((i + reorder[i % 3]) | BCI_WRITE((i + reorder[i % 3]) |
((i + 1 + ((i + 1 +
reorder[(i + 1) % 3]) << 16)); reorder[(i + 1) % 3]) << 16));
if (i < start+count) if (i < start + count)
BCI_WRITE(i + reorder[i%3]); BCI_WRITE(i + reorder[i % 3]);
} else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
BEGIN_BCI((count+1+1)/2); BEGIN_BCI((count + 1 + 1) / 2);
BCI_DRAW_INDICES_S3D(count, prim, start); BCI_DRAW_INDICES_S3D(count, prim, start);
for (i = start+1; i+1 < start+count; i += 2) for (i = start + 1; i + 1 < start + count; i += 2)
BCI_WRITE(i | ((i+1) << 16)); BCI_WRITE(i | ((i + 1) << 16));
if (i < start+count) if (i < start + count)
BCI_WRITE(i); BCI_WRITE(i);
} else { } else {
BEGIN_BCI((count+2+1)/2); BEGIN_BCI((count + 2 + 1) / 2);
BCI_DRAW_INDICES_S4(count, prim, skip); BCI_DRAW_INDICES_S4(count, prim, skip);
for (i = start; i+1 < start+count; i += 2) for (i = start; i + 1 < start + count; i += 2)
BCI_WRITE(i | ((i+1) << 16)); BCI_WRITE(i | ((i + 1) << 16));
if (i < start+count) if (i < start + count)
BCI_WRITE(i); BCI_WRITE(i);
} }
@ -479,9 +479,9 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
return -EINVAL; return -EINVAL;
} }
if (start + n > vb_size / (vb_stride*4)) { if (start + n > vb_size / (vb_stride * 4)) {
DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
start, start + n - 1, vb_size / (vb_stride*4)); start, start + n - 1, vb_size / (vb_stride * 4));
return -EINVAL; return -EINVAL;
} }
@ -493,28 +493,28 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
/* Need to reorder vertices for correct flat /* Need to reorder vertices for correct flat
* shading while preserving the clock sense * shading while preserving the clock sense
* for correct culling. Only on Savage3D. */ * for correct culling. Only on Savage3D. */
int reorder[3] = {-1, -1, -1}; int reorder[3] = { -1, -1, -1 };
reorder[start%3] = 2; reorder[start % 3] = 2;
BEGIN_DMA(count*vtx_size+1); BEGIN_DMA(count * vtx_size + 1);
DMA_DRAW_PRIMITIVE(count, prim, skip); DMA_DRAW_PRIMITIVE(count, prim, skip);
for (i = start; i < start+count; ++i) { for (i = start; i < start + count; ++i) {
unsigned int j = i + reorder[i % 3]; unsigned int j = i + reorder[i % 3];
DMA_COPY(&vtxbuf[vb_stride*j], vtx_size); DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
} }
DMA_COMMIT(); DMA_COMMIT();
} else { } else {
BEGIN_DMA(count*vtx_size+1); BEGIN_DMA(count * vtx_size + 1);
DMA_DRAW_PRIMITIVE(count, prim, skip); DMA_DRAW_PRIMITIVE(count, prim, skip);
if (vb_stride == vtx_size) { if (vb_stride == vtx_size) {
DMA_COPY(&vtxbuf[vb_stride*start], DMA_COPY(&vtxbuf[vb_stride * start],
vtx_size*count); vtx_size * count);
} else { } else {
for (i = start; i < start+count; ++i) { for (i = start; i < start + count; ++i) {
DMA_COPY(&vtxbuf[vb_stride*i], DMA_COPY(&vtxbuf[vb_stride * i],
vtx_size); vtx_size);
} }
} }
@ -544,8 +544,8 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
BCI_LOCALS; BCI_LOCALS;
if (!dmabuf) { if (!dmabuf) {
DRM_ERROR("called without dma buffers!\n"); DRM_ERROR("called without dma buffers!\n");
return -EINVAL; return -EINVAL;
} }
if (!n) if (!n)
@ -623,9 +623,9 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
/* check indices */ /* check indices */
for (i = 0; i < count; ++i) { for (i = 0; i < count; ++i) {
if (idx[i] > dmabuf->total/32) { if (idx[i] > dmabuf->total / 32) {
DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
i, idx[i], dmabuf->total/32); i, idx[i], dmabuf->total / 32);
return -EINVAL; return -EINVAL;
} }
} }
@ -634,31 +634,31 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
/* Need to reorder indices for correct flat /* Need to reorder indices for correct flat
* shading while preserving the clock sense * shading while preserving the clock sense
* for correct culling. Only on Savage3D. */ * for correct culling. Only on Savage3D. */
int reorder[3] = {2, -1, -1}; int reorder[3] = { 2, -1, -1 };
BEGIN_BCI((count+1+1)/2); BEGIN_BCI((count + 1 + 1) / 2);
BCI_DRAW_INDICES_S3D(count, prim, idx[2]); BCI_DRAW_INDICES_S3D(count, prim, idx[2]);
for (i = 1; i+1 < count; i += 2) for (i = 1; i + 1 < count; i += 2)
BCI_WRITE(idx[i + reorder[i % 3]] | BCI_WRITE(idx[i + reorder[i % 3]] |
(idx[i + 1 + (idx[i + 1 +
reorder[(i + 1) % 3]] << 16)); reorder[(i + 1) % 3]] << 16));
if (i < count) if (i < count)
BCI_WRITE(idx[i + reorder[i%3]]); BCI_WRITE(idx[i + reorder[i % 3]]);
} else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
BEGIN_BCI((count+1+1)/2); BEGIN_BCI((count + 1 + 1) / 2);
BCI_DRAW_INDICES_S3D(count, prim, idx[0]); BCI_DRAW_INDICES_S3D(count, prim, idx[0]);
for (i = 1; i+1 < count; i += 2) for (i = 1; i + 1 < count; i += 2)
BCI_WRITE(idx[i] | (idx[i+1] << 16)); BCI_WRITE(idx[i] | (idx[i + 1] << 16));
if (i < count) if (i < count)
BCI_WRITE(idx[i]); BCI_WRITE(idx[i]);
} else { } else {
BEGIN_BCI((count+2+1)/2); BEGIN_BCI((count + 2 + 1) / 2);
BCI_DRAW_INDICES_S4(count, prim, skip); BCI_DRAW_INDICES_S4(count, prim, skip);
for (i = 0; i+1 < count; i += 2) for (i = 0; i + 1 < count; i += 2)
BCI_WRITE(idx[i] | (idx[i+1] << 16)); BCI_WRITE(idx[i] | (idx[i + 1] << 16));
if (i < count) if (i < count)
BCI_WRITE(idx[i]); BCI_WRITE(idx[i]);
} }
@ -743,9 +743,9 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
/* Check indices */ /* Check indices */
for (i = 0; i < count; ++i) { for (i = 0; i < count; ++i) {
if (idx[i] > vb_size / (vb_stride*4)) { if (idx[i] > vb_size / (vb_stride * 4)) {
DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
i, idx[i], vb_size / (vb_stride*4)); i, idx[i], vb_size / (vb_stride * 4));
return -EINVAL; return -EINVAL;
} }
} }
@ -754,24 +754,24 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
/* Need to reorder vertices for correct flat /* Need to reorder vertices for correct flat
* shading while preserving the clock sense * shading while preserving the clock sense
* for correct culling. Only on Savage3D. */ * for correct culling. Only on Savage3D. */
int reorder[3] = {2, -1, -1}; int reorder[3] = { 2, -1, -1 };
BEGIN_DMA(count*vtx_size+1); BEGIN_DMA(count * vtx_size + 1);
DMA_DRAW_PRIMITIVE(count, prim, skip); DMA_DRAW_PRIMITIVE(count, prim, skip);
for (i = 0; i < count; ++i) { for (i = 0; i < count; ++i) {
unsigned int j = idx[i + reorder[i % 3]]; unsigned int j = idx[i + reorder[i % 3]];
DMA_COPY(&vtxbuf[vb_stride*j], vtx_size); DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
} }
DMA_COMMIT(); DMA_COMMIT();
} else { } else {
BEGIN_DMA(count*vtx_size+1); BEGIN_DMA(count * vtx_size + 1);
DMA_DRAW_PRIMITIVE(count, prim, skip); DMA_DRAW_PRIMITIVE(count, prim, skip);
for (i = 0; i < count; ++i) { for (i = 0; i < count; ++i) {
unsigned int j = idx[i]; unsigned int j = idx[i];
DMA_COPY(&vtxbuf[vb_stride*j], vtx_size); DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
} }
DMA_COMMIT(); DMA_COMMIT();
@ -823,12 +823,12 @@ static int savage_dispatch_clear(drm_savage_private_t *dev_priv,
x = boxes[i].x1, y = boxes[i].y1; x = boxes[i].x1, y = boxes[i].y1;
w = boxes[i].x2 - boxes[i].x1; w = boxes[i].x2 - boxes[i].x1;
h = boxes[i].y2 - boxes[i].y1; h = boxes[i].y2 - boxes[i].y1;
BEGIN_DMA(nbufs*6); BEGIN_DMA(nbufs * 6);
for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) { for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {
if (!(flags & buf)) if (!(flags & buf))
continue; continue;
DMA_WRITE(clear_cmd); DMA_WRITE(clear_cmd);
switch(buf) { switch (buf) {
case SAVAGE_FRONT: case SAVAGE_FRONT:
DMA_WRITE(dev_priv->front_offset); DMA_WRITE(dev_priv->front_offset);
DMA_WRITE(dev_priv->front_bd); DMA_WRITE(dev_priv->front_bd);
@ -880,8 +880,8 @@ static int savage_dispatch_swap(drm_savage_private_t *dev_priv,
DMA_WRITE(dev_priv->back_bd); DMA_WRITE(dev_priv->back_bd);
DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1)); DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1)); DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
DMA_WRITE(BCI_W_H(boxes[i].x2-boxes[i].x1, DMA_WRITE(BCI_W_H(boxes[i].x2 - boxes[i].x1,
boxes[i].y2-boxes[i].y1)); boxes[i].y2 - boxes[i].y1));
DMA_COMMIT(); DMA_COMMIT();
} }
@ -973,7 +973,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
if (cmdbuf->dma_idx > dma->buf_count) { if (cmdbuf->dma_idx > dma->buf_count) {
DRM_ERROR DRM_ERROR
("vertex buffer index %u out of range (0-%u)\n", ("vertex buffer index %u out of range (0-%u)\n",
cmdbuf->dma_idx, dma->buf_count-1); cmdbuf->dma_idx, dma->buf_count - 1);
return -EINVAL; return -EINVAL;
} }
dmabuf = dma->buflist[cmdbuf->dma_idx]; dmabuf = dma->buflist[cmdbuf->dma_idx];
@ -1064,15 +1064,15 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
case SAVAGE_CMD_DMA_PRIM: case SAVAGE_CMD_DMA_PRIM:
case SAVAGE_CMD_VB_PRIM: case SAVAGE_CMD_VB_PRIM:
if (!first_draw_cmd) if (!first_draw_cmd)
first_draw_cmd = cmdbuf->cmd_addr-1; first_draw_cmd = cmdbuf->cmd_addr - 1;
cmdbuf->cmd_addr += j; cmdbuf->cmd_addr += j;
i += j; i += j;
break; break;
default: default:
if (first_draw_cmd) { if (first_draw_cmd) {
ret = savage_dispatch_draw ( ret = savage_dispatch_draw(
dev_priv, first_draw_cmd, dev_priv, first_draw_cmd,
cmdbuf->cmd_addr-1, cmdbuf->cmd_addr - 1,
dmabuf, cmdbuf->vb_addr, dmabuf, cmdbuf->vb_addr,
cmdbuf->vb_size, cmdbuf->vb_size,
cmdbuf->vb_stride, cmdbuf->vb_stride,
@ -1134,7 +1134,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
} }
if (first_draw_cmd) { if (first_draw_cmd) {
ret = savage_dispatch_draw ( ret = savage_dispatch_draw(
dev_priv, first_draw_cmd, cmdbuf->cmd_addr, dmabuf, dev_priv, first_draw_cmd, cmdbuf->cmd_addr, dmabuf,
cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride, cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride,
cmdbuf->nbox, cmdbuf->box_addr); cmdbuf->nbox, cmdbuf->box_addr);

View File

@ -84,8 +84,6 @@ extern int sis_final_context(struct drm_device * dev, int context);
#endif #endif
extern struct drm_ioctl_desc sis_ioctls[]; extern struct drm_ioctl_desc sis_ioctls[];
extern int sis_max_ioctl; extern int sis_max_ioctl;

View File

@ -1643,7 +1643,6 @@
#define HC_HAGPBpID_STOP 0x00000002 #define HC_HAGPBpID_STOP 0x00000002
#define HC_HAGPBpH_MASK 0x00ffffff #define HC_HAGPBpH_MASK 0x00ffffff
#define VIA_VIDEO_HEADER5 0xFE040000 #define VIA_VIDEO_HEADER5 0xFE040000
#define VIA_VIDEO_HEADER6 0xFE050000 #define VIA_VIDEO_HEADER6 0xFE050000
#define VIA_VIDEO_HEADER7 0xFE060000 #define VIA_VIDEO_HEADER7 0xFE060000

View File

@ -54,11 +54,11 @@
*vb++ = (w2); \ *vb++ = (w2); \
dev_priv->dma_low += 8; dev_priv->dma_low += 8;
static void via_cmdbuf_start(drm_via_private_t * dev_priv); static void via_cmdbuf_start(drm_via_private_t *dev_priv);
static void via_cmdbuf_pause(drm_via_private_t * dev_priv); static void via_cmdbuf_pause(drm_via_private_t *dev_priv);
static void via_cmdbuf_reset(drm_via_private_t * dev_priv); static void via_cmdbuf_reset(drm_via_private_t *dev_priv);
static void via_cmdbuf_rewind(drm_via_private_t * dev_priv); static void via_cmdbuf_rewind(drm_via_private_t *dev_priv);
static int via_wait_idle(drm_via_private_t * dev_priv); static int via_wait_idle(drm_via_private_t *dev_priv);
static void via_pad_cache(drm_via_private_t *dev_priv, int qwords); static void via_pad_cache(drm_via_private_t *dev_priv, int qwords);
@ -110,7 +110,7 @@ via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
if (count-- == 0) { if (count-- == 0) {
DRM_ERROR DRM_ERROR
("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n", ("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
hw_addr, cur_addr, next_addr); hw_addr, cur_addr, next_addr);
return -1; return -1;
} }
} while ((cur_addr < hw_addr) && (next_addr >= hw_addr)); } while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
@ -450,7 +450,7 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
static int via_wait_idle(drm_via_private_t * dev_priv) static int via_wait_idle(drm_via_private_t *dev_priv)
{ {
int count = 10000000; int count = 10000000;
@ -462,7 +462,7 @@ static int via_wait_idle(drm_via_private_t * dev_priv)
return count; return count;
} }
static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type, static uint32_t *via_align_cmd(drm_via_private_t *dev_priv, uint32_t cmd_type,
uint32_t addr, uint32_t *cmd_addr_hi, uint32_t addr, uint32_t *cmd_addr_hi,
uint32_t *cmd_addr_lo, int skip_wait) uint32_t *cmd_addr_lo, int skip_wait)
{ {
@ -472,11 +472,12 @@ static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type,
uint32_t qw_pad_count; uint32_t qw_pad_count;
if (!skip_wait) if (!skip_wait)
via_cmdbuf_wait(dev_priv, 2*CMDBUF_ALIGNMENT_SIZE); via_cmdbuf_wait(dev_priv, 2 * CMDBUF_ALIGNMENT_SIZE);
vb = via_get_dma(dev_priv); vb = via_get_dma(dev_priv);
VIA_OUT_RING_QW( HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) | VIA_OUT_RING_QW(HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
(VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16); (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);
agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) - qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3); ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
@ -557,8 +558,8 @@ static void via_pad_cache(drm_via_private_t *dev_priv, int qwords)
via_cmdbuf_wait(dev_priv, qwords + 2); via_cmdbuf_wait(dev_priv, qwords + 2);
vb = via_get_dma(dev_priv); vb = via_get_dma(dev_priv);
VIA_OUT_RING_QW( HC_HEADER2, HC_ParaType_NotTex << 16); VIA_OUT_RING_QW(HC_HEADER2, HC_ParaType_NotTex << 16);
via_align_buffer(dev_priv,vb,qwords); via_align_buffer(dev_priv, vb, qwords);
} }
static inline void via_dummy_bitblt(drm_via_private_t * dev_priv) static inline void via_dummy_bitblt(drm_via_private_t * dev_priv)
@ -577,7 +578,7 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
volatile uint32_t *last_pause_ptr; volatile uint32_t *last_pause_ptr;
agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi, via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
&jump_addr_lo, 0); &jump_addr_lo, 0);
dev_priv->dma_wrap = dev_priv->dma_low; dev_priv->dma_wrap = dev_priv->dma_low;
@ -594,16 +595,15 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
via_dummy_bitblt(dev_priv); via_dummy_bitblt(dev_priv);
via_dummy_bitblt(dev_priv); via_dummy_bitblt(dev_priv);
last_pause_ptr = via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi, last_pause_ptr = via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
&pause_addr_lo, 0) -1; &pause_addr_lo, 0) -1;
via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi, via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
&pause_addr_lo, 0); &pause_addr_lo, 0);
*last_pause_ptr = pause_addr_lo; *last_pause_ptr = pause_addr_lo;
via_hook_segment( dev_priv, jump_addr_hi, jump_addr_lo, 0); via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0);
} }
static void via_cmdbuf_rewind(drm_via_private_t * dev_priv) static void via_cmdbuf_rewind(drm_via_private_t * dev_priv)
{ {
via_cmdbuf_jump(dev_priv); via_cmdbuf_jump(dev_priv);
@ -614,7 +614,7 @@ static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type)
uint32_t pause_addr_lo, pause_addr_hi; uint32_t pause_addr_lo, pause_addr_hi;
via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0); via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0);
via_hook_segment( dev_priv, pause_addr_hi, pause_addr_lo, 0); via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
} }
@ -653,7 +653,7 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
count = 1000000; count = 1000000;
tmp_size = d_siz->size; tmp_size = d_siz->size;
switch(d_siz->func) { switch (d_siz->func) {
case VIA_CMDBUF_SPACE: case VIA_CMDBUF_SPACE:
while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size) while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
&& count--) { && count--) {

View File

@ -228,7 +228,7 @@ typedef enum {
#define VIA_IRQ_FLAGS_MASK 0xF0000000 #define VIA_IRQ_FLAGS_MASK 0xF0000000
enum drm_via_irqs{ enum drm_via_irqs {
drm_via_irq_hqv0 = 0, drm_via_irq_hqv0 = 0,
drm_via_irq_hqv1, drm_via_irq_hqv1,
drm_via_irq_dma0_dd, drm_via_irq_dma0_dd,
@ -238,7 +238,7 @@ enum drm_via_irqs{
drm_via_irq_num drm_via_irq_num
}; };
struct drm_via_wait_irq_request{ struct drm_via_wait_irq_request {
unsigned irq; unsigned irq;
via_irq_seq_type_t type; via_irq_seq_type_t type;
uint32_t sequence; uint32_t sequence;
@ -270,9 +270,9 @@ typedef struct drm_via_dmablit {
uint32_t fb_stride; uint32_t fb_stride;
unsigned char *mem_addr; unsigned char *mem_addr;
uint32_t mem_stride; uint32_t mem_stride;
uint32_t flags; uint32_t flags;
int to_fb; int to_fb;
drm_via_blitsync_t sync; drm_via_blitsync_t sync;

View File

@ -76,8 +76,7 @@ static maskarray_t via_pro_group_a_irqs[] = {
{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
}; };
static int via_num_pro_group_a = static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs);
sizeof(via_pro_group_a_irqs)/sizeof(maskarray_t);
static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3}; static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
static maskarray_t via_unichrome_irqs[] = { static maskarray_t via_unichrome_irqs[] = {
@ -86,15 +85,15 @@ static maskarray_t via_unichrome_irqs[] = {
{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008} VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
}; };
static int via_num_unichrome = sizeof(via_unichrome_irqs)/sizeof(maskarray_t); static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs);
static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1}; static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
static unsigned time_diff(struct timeval *now,struct timeval *then) static unsigned time_diff(struct timeval *now,struct timeval *then)
{ {
return (now->tv_usec >= then->tv_usec) ? return (now->tv_usec >= then->tv_usec) ?
now->tv_usec - then->tv_usec : now->tv_usec - then->tv_usec :
1000000 - (then->tv_usec - now->tv_usec); 1000000 - (then->tv_usec - now->tv_usec);
} }
irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
@ -126,17 +125,17 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
} }
if (!(atomic_read(&dev->vbl_received) & 0xFF)) { if (!(atomic_read(&dev->vbl_received) & 0xFF)) {
DRM_DEBUG("US per vblank is: %u\n", DRM_DEBUG("US per vblank is: %u\n",
dev_priv->usec_per_vblank); dev_priv->usec_per_vblank);
} }
DRM_WAKEUP(&dev->vbl_queue); DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals(dev); drm_vbl_send_signals(dev);
handled = 1; handled = 1;
} }
for (i=0; i<dev_priv->num_irqs; ++i) { for (i = 0; i < dev_priv->num_irqs; ++i) {
if (status & cur_irq->pending_mask) { if (status & cur_irq->pending_mask) {
atomic_inc( &cur_irq->irq_received ); atomic_inc(&cur_irq->irq_received);
DRM_WAKEUP( &cur_irq->irq_queue ); DRM_WAKEUP(&cur_irq->irq_queue);
handled = 1; handled = 1;
#ifdef VIA_HAVE_DMABLIT #ifdef VIA_HAVE_DMABLIT
if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) { if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
@ -216,7 +215,7 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
return -EINVAL; return -EINVAL;
} }
if (irq >= drm_via_irq_num ) { if (irq >= drm_via_irq_num) {
DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,
irq); irq);
return -EINVAL; return -EINVAL;
@ -278,11 +277,11 @@ void via_driver_irq_preinstall(struct drm_device * dev)
dev_priv->irq_map = via_irqmap_unichrome; dev_priv->irq_map = via_irqmap_unichrome;
} }
for(i=0; i < dev_priv->num_irqs; ++i) { for (i = 0; i < dev_priv->num_irqs; ++i) {
atomic_set(&cur_irq->irq_received, 0); atomic_set(&cur_irq->irq_received, 0);
cur_irq->enable_mask = dev_priv->irq_masks[i][0]; cur_irq->enable_mask = dev_priv->irq_masks[i][0];
cur_irq->pending_mask = dev_priv->irq_masks[i][1]; cur_irq->pending_mask = dev_priv->irq_masks[i][1];
DRM_INIT_WAITQUEUE( &cur_irq->irq_queue ); DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
dev_priv->irq_enable_mask |= cur_irq->enable_mask; dev_priv->irq_enable_mask |= cur_irq->enable_mask;
dev_priv->irq_pending_mask |= cur_irq->pending_mask; dev_priv->irq_pending_mask |= cur_irq->pending_mask;
cur_irq++; cur_irq++;

View File

@ -249,10 +249,10 @@ eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
* Partially stolen from drm_memory.h * Partially stolen from drm_memory.h
*/ */
static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t * seq, static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
unsigned long offset, unsigned long offset,
unsigned long size, unsigned long size,
struct drm_device * dev) struct drm_device *dev)
{ {
#ifdef __linux__ #ifdef __linux__
struct drm_map_list *r_list; struct drm_map_list *r_list;

View File

@ -33,8 +33,6 @@ typedef enum {
tex_address tex_address
} drm_via_sequence_t; } drm_via_sequence_t;
typedef struct { typedef struct {
unsigned texture; unsigned texture;
uint32_t z_addr; uint32_t z_addr;
@ -45,7 +43,7 @@ typedef struct {
uint32_t tex_level_lo[2]; uint32_t tex_level_lo[2];
uint32_t tex_level_hi[2]; uint32_t tex_level_hi[2];
uint32_t tex_palette_size[2]; uint32_t tex_palette_size[2];
uint32_t tex_npot[2]; uint32_t tex_npot[2];
drm_via_sequence_t unfinished; drm_via_sequence_t unfinished;
int agp_texture; int agp_texture;
int multitex; int multitex;
@ -56,9 +54,9 @@ typedef struct {
const uint32_t *buf_start; const uint32_t *buf_start;
} drm_via_state_t; } drm_via_state_t;
extern int via_verify_command_stream(const uint32_t * buf, unsigned int size, extern int via_verify_command_stream(const uint32_t *buf, unsigned int size,
struct drm_device *dev, int agp); struct drm_device *dev, int agp);
extern int via_parse_command_stream(struct drm_device *dev, const uint32_t * buf, extern int via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
unsigned int size); unsigned int size);
#endif #endif