Support AGP bridges where the AGP aperture can't be accessed directly by
the CPU (David Mosberger, Benjamin Herrenschmidt, myself, Paul Mackerras, Jeff Wiedemeier)main
parent
1d5bf7a7de
commit
e5d3c7f260
|
@ -101,13 +101,13 @@ typedef struct drm_file drm_file_t;
|
|||
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
|
||||
|
||||
/* Mapping helper macros */
|
||||
#define DRM_IOREMAP(map) \
|
||||
#define DRM_IOREMAP(map, dev) \
|
||||
(map)->handle = DRM(ioremap)( dev, map )
|
||||
|
||||
#define DRM_IOREMAP_NOCACHE(map) \
|
||||
#define DRM_IOREMAP_NOCACHE(map, dev) \
|
||||
(map)->handle = DRM(ioremap_nocache)( dev, map )
|
||||
|
||||
#define DRM_IOREMAPFREE(map) \
|
||||
#define DRM_IOREMAPFREE(map, dev) \
|
||||
do { \
|
||||
if ( (map)->handle && (map)->size ) \
|
||||
DRM(ioremapfree)( map ); \
|
||||
|
|
|
@ -136,7 +136,7 @@ int DRM(addmap)( DRM_IOCTL_ARGS )
|
|||
#endif
|
||||
}
|
||||
#endif /* __REALLY_HAVE_MTRR */
|
||||
DRM_IOREMAP(map);
|
||||
DRM_IOREMAP(map, dev);
|
||||
break;
|
||||
|
||||
case _DRM_SHM:
|
||||
|
|
|
@ -101,13 +101,13 @@ typedef struct drm_file drm_file_t;
|
|||
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
|
||||
|
||||
/* Mapping helper macros */
|
||||
#define DRM_IOREMAP(map) \
|
||||
#define DRM_IOREMAP(map, dev) \
|
||||
(map)->handle = DRM(ioremap)( dev, map )
|
||||
|
||||
#define DRM_IOREMAP_NOCACHE(map) \
|
||||
#define DRM_IOREMAP_NOCACHE(map, dev) \
|
||||
(map)->handle = DRM(ioremap_nocache)( dev, map )
|
||||
|
||||
#define DRM_IOREMAPFREE(map) \
|
||||
#define DRM_IOREMAPFREE(map, dev) \
|
||||
do { \
|
||||
if ( (map)->handle && (map)->size ) \
|
||||
DRM(ioremapfree)( map ); \
|
||||
|
|
|
@ -136,7 +136,7 @@ int DRM(addmap)( DRM_IOCTL_ARGS )
|
|||
#endif
|
||||
}
|
||||
#endif /* __REALLY_HAVE_MTRR */
|
||||
DRM_IOREMAP(map);
|
||||
DRM_IOREMAP(map, dev);
|
||||
break;
|
||||
|
||||
case _DRM_SHM:
|
||||
|
|
|
@ -251,16 +251,16 @@ static inline struct page * vmalloc_to_page(void * vmalloc_addr)
|
|||
if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; }
|
||||
|
||||
/* Mapping helper macros */
|
||||
#define DRM_IOREMAP(map) \
|
||||
(map)->handle = DRM(ioremap)( (map)->offset, (map)->size )
|
||||
#define DRM_IOREMAP(map, dev) \
|
||||
(map)->handle = DRM(ioremap)( (map)->offset, (map)->size, (dev) )
|
||||
|
||||
#define DRM_IOREMAP_NOCACHE(map) \
|
||||
(map)->handle = DRM(ioremap_nocache)((map)->offset, (map)->size)
|
||||
#define DRM_IOREMAP_NOCACHE(map, dev) \
|
||||
(map)->handle = DRM(ioremap_nocache)((map)->offset, (map)->size, (dev))
|
||||
|
||||
#define DRM_IOREMAPFREE(map) \
|
||||
do { \
|
||||
if ( (map)->handle && (map)->size ) \
|
||||
DRM(ioremapfree)( (map)->handle, (map)->size ); \
|
||||
#define DRM_IOREMAPFREE(map, dev) \
|
||||
do { \
|
||||
if ( (map)->handle && (map)->size ) \
|
||||
DRM(ioremapfree)( (map)->handle, (map)->size, (dev) ); \
|
||||
} while (0)
|
||||
|
||||
#define DRM_FIND_MAP(_map, _o) \
|
||||
|
@ -682,9 +682,10 @@ extern void DRM(free)(void *pt, size_t size, int area);
|
|||
extern unsigned long DRM(alloc_pages)(int order, int area);
|
||||
extern void DRM(free_pages)(unsigned long address, int order,
|
||||
int area);
|
||||
extern void *DRM(ioremap)(unsigned long offset, unsigned long size);
|
||||
extern void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size);
|
||||
extern void DRM(ioremapfree)(void *pt, unsigned long size);
|
||||
extern void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev);
|
||||
extern void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size,
|
||||
drm_device_t *dev);
|
||||
extern void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev);
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
extern agp_memory *DRM(alloc_agp)(int pages, u32 type);
|
||||
|
|
|
@ -124,7 +124,7 @@ int DRM(addmap)( struct inode *inode, struct file *filp,
|
|||
MTRR_TYPE_WRCOMB, 1 );
|
||||
}
|
||||
#endif
|
||||
map->handle = DRM(ioremap)( map->offset, map->size );
|
||||
map->handle = DRM(ioremap)( map->offset, map->size, dev );
|
||||
break;
|
||||
|
||||
case _DRM_SHM:
|
||||
|
@ -246,7 +246,7 @@ int DRM(rmmap)(struct inode *inode, struct file *filp,
|
|||
DRM_DEBUG("mtrr_del = %d\n", retcode);
|
||||
}
|
||||
#endif
|
||||
DRM(ioremapfree)(map->handle, map->size);
|
||||
DRM(ioremapfree)(map->handle, map->size, dev);
|
||||
break;
|
||||
case _DRM_SHM:
|
||||
vfree(map->handle);
|
||||
|
|
|
@ -454,7 +454,7 @@ static int DRM(takedown)( drm_device_t *dev )
|
|||
DRM_DEBUG( "mtrr_del=%d\n", retcode );
|
||||
}
|
||||
#endif
|
||||
DRM(ioremapfree)( map->handle, map->size );
|
||||
DRM(ioremapfree)( map->handle, map->size, dev );
|
||||
break;
|
||||
case _DRM_SHM:
|
||||
vfree(map->handle);
|
||||
|
|
|
@ -39,6 +39,159 @@
|
|||
*/
|
||||
#define DEBUG_MEMORY 0
|
||||
|
||||
/* Need the 4-argument version of vmap(). */
|
||||
#if __REALLY_HAVE_AGP && defined(VMAP_4_ARGS)
|
||||
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#ifdef HAVE_PAGE_AGP
|
||||
#include <asm/agp.h>
|
||||
#else
|
||||
# ifdef __powerpc__
|
||||
# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
|
||||
# else
|
||||
# define PAGE_AGP PAGE_KERNEL
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
|
||||
# include <asm/tlbflush.h>
|
||||
#else
|
||||
# define pte_offset_kernel(dir, address) pte_offset(dir, address)
|
||||
# define pte_pfn(pte) (pte_page(pte) - mem_map)
|
||||
# define pfn_to_page(pfn) (mem_map + (pfn))
|
||||
# define flush_tlb_kernel_range(s,e) flush_tlb_all()
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Find the drm_map that covers the range [offset, offset+size).
|
||||
*/
|
||||
static inline drm_map_t *
|
||||
drm_lookup_map (unsigned long offset, unsigned long size, drm_device_t *dev)
|
||||
{
|
||||
struct list_head *list;
|
||||
drm_map_list_t *r_list;
|
||||
drm_map_t *map;
|
||||
|
||||
list_for_each(list, &dev->maplist->head) {
|
||||
r_list = (drm_map_list_t *) list;
|
||||
map = r_list->map;
|
||||
if (!map)
|
||||
continue;
|
||||
if (map->offset <= offset && (offset + size) <= (map->offset + map->size))
|
||||
return map;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void *
|
||||
agp_remap (unsigned long offset, unsigned long size, drm_device_t *dev)
|
||||
{
|
||||
unsigned long *phys_addr_map, i, num_pages = PAGE_ALIGN(size) / PAGE_SIZE;
|
||||
struct drm_agp_mem *agpmem;
|
||||
struct page **page_map;
|
||||
void *addr;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
#ifdef __alpha__
|
||||
offset -= dev->hose->mem_space->start;
|
||||
#endif
|
||||
|
||||
for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next)
|
||||
if (agpmem->bound <= offset
|
||||
&& (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= (offset + size))
|
||||
break;
|
||||
if (!agpmem)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* OK, we're mapping AGP space on a chipset/platform on which memory accesses by
|
||||
* the CPU do not get remapped by the GART. We fix this by using the kernel's
|
||||
* page-table instead (that's probably faster anyhow...).
|
||||
*/
|
||||
/* note: use vmalloc() because num_pages could be large... */
|
||||
page_map = vmalloc(num_pages * sizeof(struct page *));
|
||||
if (!page_map)
|
||||
return NULL;
|
||||
|
||||
phys_addr_map = agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
|
||||
for (i = 0; i < num_pages; ++i)
|
||||
page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
|
||||
addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
|
||||
vfree(page_map);
|
||||
if (!addr)
|
||||
return NULL;
|
||||
|
||||
flush_tlb_kernel_range((unsigned long) addr, (unsigned long) addr + size);
|
||||
return addr;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
drm_follow_page (void *vaddr)
|
||||
{
|
||||
pgd_t *pgd = pgd_offset_k((unsigned long) vaddr);
|
||||
pmd_t *pmd = pmd_offset(pgd, (unsigned long) vaddr);
|
||||
pte_t *ptep = pte_offset_kernel(pmd, (unsigned long) vaddr);
|
||||
return pte_pfn(*ptep) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
#endif /* __REALLY_HAVE_AGP && defined(VMAP_4_ARGS) */
|
||||
|
||||
static inline void *drm_ioremap(unsigned long offset, unsigned long size, drm_device_t *dev)
|
||||
{
|
||||
#if __REALLY_HAVE_AGP && defined(VMAP_4_ARGS)
|
||||
if (dev->agp && dev->agp->cant_use_aperture) {
|
||||
drm_map_t *map = drm_lookup_map(offset, size, dev);
|
||||
|
||||
if (map && map->type == _DRM_AGP)
|
||||
return agp_remap(offset, size, dev);
|
||||
}
|
||||
#endif
|
||||
|
||||
return ioremap(offset, size);
|
||||
}
|
||||
|
||||
static inline void *drm_ioremap_nocache(unsigned long offset, unsigned long size,
|
||||
drm_device_t *dev)
|
||||
{
|
||||
#if __REALLY_HAVE_AGP && defined(VMAP_4_ARGS)
|
||||
if (dev->agp && dev->agp->cant_use_aperture) {
|
||||
drm_map_t *map = drm_lookup_map(offset, size, dev);
|
||||
|
||||
if (map && map->type == _DRM_AGP)
|
||||
return agp_remap(offset, size, dev);
|
||||
}
|
||||
#endif
|
||||
|
||||
return ioremap_nocache(offset, size);
|
||||
}
|
||||
|
||||
static inline void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev)
|
||||
{
|
||||
#if __REALLY_HAVE_AGP && defined(VMAP_4_ARGS)
|
||||
/*
|
||||
* This is a bit ugly. It would be much cleaner if the DRM API would use separate
|
||||
* routines for handling mappings in the AGP space. Hopefully this can be done in
|
||||
* a future revision of the interface...
|
||||
*/
|
||||
if (dev->agp && dev->agp->cant_use_aperture
|
||||
&& ((unsigned long) pt >= VMALLOC_START && (unsigned long) pt < VMALLOC_END))
|
||||
{
|
||||
unsigned long offset;
|
||||
drm_map_t *map;
|
||||
|
||||
offset = drm_follow_page(pt) | ((unsigned long) pt & ~PAGE_MASK);
|
||||
map = drm_lookup_map(offset, size, dev);
|
||||
if (map && map->type == _DRM_AGP) {
|
||||
vunmap(pt);
|
||||
return;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
iounmap(pt);
|
||||
}
|
||||
|
||||
#if DEBUG_MEMORY
|
||||
#include "drm_memory_debug.h"
|
||||
|
@ -119,19 +272,19 @@ void DRM(free_pages)(unsigned long address, int order, int area)
|
|||
free_pages(address, order);
|
||||
}
|
||||
|
||||
void *DRM(ioremap)(unsigned long offset, unsigned long size)
|
||||
void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev)
|
||||
{
|
||||
return ioremap(offset, size);
|
||||
return drm_ioremap(offset, size, dev);
|
||||
}
|
||||
|
||||
void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size)
|
||||
void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, drm_device_t *dev)
|
||||
{
|
||||
return ioremap_nocache(offset, size);
|
||||
return drm_ioremap_nocache(offset, size, dev);
|
||||
}
|
||||
|
||||
void DRM(ioremapfree)(void *pt, unsigned long size)
|
||||
void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev)
|
||||
{
|
||||
iounmap(pt);
|
||||
drm_ioremapfree(pt, size, dev);
|
||||
}
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
|
|
|
@ -270,7 +270,7 @@ void DRM(free_pages)(unsigned long address, int order, int area)
|
|||
}
|
||||
}
|
||||
|
||||
void *DRM(ioremap)(unsigned long offset, unsigned long size)
|
||||
void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev)
|
||||
{
|
||||
void *pt;
|
||||
|
||||
|
@ -280,7 +280,7 @@ void *DRM(ioremap)(unsigned long offset, unsigned long size)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (!(pt = ioremap(offset, size))) {
|
||||
if (!(pt = drm_ioremap(offset, size, dev))) {
|
||||
spin_lock(&DRM(mem_lock));
|
||||
++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
|
@ -293,7 +293,7 @@ void *DRM(ioremap)(unsigned long offset, unsigned long size)
|
|||
return pt;
|
||||
}
|
||||
|
||||
void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size)
|
||||
void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, drm_device_t *dev)
|
||||
{
|
||||
void *pt;
|
||||
|
||||
|
@ -303,7 +303,7 @@ void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (!(pt = ioremap_nocache(offset, size))) {
|
||||
if (!(pt = drm_ioremap_nocache(offset, size, dev))) {
|
||||
spin_lock(&DRM(mem_lock));
|
||||
++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
|
@ -316,7 +316,7 @@ void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size)
|
|||
return pt;
|
||||
}
|
||||
|
||||
void DRM(ioremapfree)(void *pt, unsigned long size)
|
||||
void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev)
|
||||
{
|
||||
int alloc_count;
|
||||
int free_count;
|
||||
|
@ -325,7 +325,7 @@ void DRM(ioremapfree)(void *pt, unsigned long size)
|
|||
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
|
||||
"Attempt to free NULL pointer\n");
|
||||
else
|
||||
iounmap(pt);
|
||||
drm_ioremapfree(pt, size, dev);
|
||||
|
||||
spin_lock(&DRM(mem_lock));
|
||||
DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_freed += size;
|
||||
|
|
|
@ -108,12 +108,12 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
|
|||
* Get the page, inc the use count, and return it
|
||||
*/
|
||||
offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
|
||||
agpmem->memory->memory[offset] &= dev->agp->page_mask;
|
||||
page = virt_to_page(__va(agpmem->memory->memory[offset]));
|
||||
get_page(page);
|
||||
|
||||
DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx\n",
|
||||
baddr, __va(agpmem->memory->memory[offset]), offset);
|
||||
DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
|
||||
baddr, __va(agpmem->memory->memory[offset]), offset,
|
||||
atomic_read(&page->count));
|
||||
|
||||
return page;
|
||||
}
|
||||
|
@ -207,7 +207,7 @@ void DRM(vm_shm_close)(struct vm_area_struct *vma)
|
|||
DRM_DEBUG("mtrr_del = %d\n", retcode);
|
||||
}
|
||||
#endif
|
||||
DRM(ioremapfree)(map->handle, map->size);
|
||||
DRM(ioremapfree)(map->handle, map->size, dev);
|
||||
break;
|
||||
case _DRM_SHM:
|
||||
vfree(map->handle);
|
||||
|
@ -381,7 +381,16 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
|
|||
|
||||
if ( !priv->authenticated ) return -EACCES;
|
||||
|
||||
if (!VM_OFFSET(vma)) return DRM(mmap_dma)(filp, vma);
|
||||
/* We check for "dma". On Apple's UniNorth, it's valid to have
|
||||
* the AGP mapped at physical address 0
|
||||
* --BenH.
|
||||
*/
|
||||
if (!VM_OFFSET(vma)
|
||||
#if __REALLY_HAVE_AGP
|
||||
&& (!dev->agp || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
|
||||
#endif
|
||||
)
|
||||
return DRM(mmap_dma)(filp, vma);
|
||||
|
||||
/* A sequential search of a linked list is
|
||||
fine here because: 1) there will only be
|
||||
|
@ -421,15 +430,19 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
|
|||
|
||||
switch (map->type) {
|
||||
case _DRM_AGP:
|
||||
#if defined(__alpha__)
|
||||
#if __REALLY_HAVE_AGP
|
||||
if (dev->agp->cant_use_aperture) {
|
||||
/*
|
||||
* On Alpha we can't talk to bus dma address from the
|
||||
* CPU, so for memory of type DRM_AGP, we'll deal with
|
||||
* sorting out the real physical pages and mappings
|
||||
* in nopage()
|
||||
* On some platforms we can't talk to bus dma address from the CPU, so for
|
||||
* memory of type DRM_AGP, we'll deal with sorting out the real physical
|
||||
* pages and mappings in nopage()
|
||||
*/
|
||||
#if defined(__powerpc__)
|
||||
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
|
||||
#endif
|
||||
vma->vm_ops = &DRM(vm_ops);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
/* fall through to _DRM_FRAME_BUFFER... */
|
||||
case _DRM_FRAME_BUFFER:
|
||||
|
@ -440,15 +453,15 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
|
|||
pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
|
||||
pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
|
||||
}
|
||||
#elif defined(__ia64__)
|
||||
if (map->type != _DRM_AGP)
|
||||
vma->vm_page_prot =
|
||||
pgprot_writecombine(vma->vm_page_prot);
|
||||
#elif defined(__powerpc__)
|
||||
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED;
|
||||
#endif
|
||||
vma->vm_flags |= VM_IO; /* not in core dump */
|
||||
}
|
||||
#if defined(__ia64__)
|
||||
if (map->type != _DRM_AGP)
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
#endif
|
||||
offset = DRIVER_GET_REG_OFS();
|
||||
#ifdef __sparc__
|
||||
if (io_remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
|
||||
|
|
|
@ -253,7 +253,7 @@ int i810_dma_cleanup(drm_device_t *dev)
|
|||
|
||||
if(dev_priv->ring.virtual_start) {
|
||||
DRM(ioremapfree)((void *) dev_priv->ring.virtual_start,
|
||||
dev_priv->ring.Size);
|
||||
dev_priv->ring.Size, dev);
|
||||
}
|
||||
if (dev_priv->hw_status_page) {
|
||||
pci_free_consistent(dev->pdev, PAGE_SIZE,
|
||||
|
@ -270,7 +270,7 @@ int i810_dma_cleanup(drm_device_t *dev)
|
|||
drm_buf_t *buf = dma->buflist[ i ];
|
||||
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
|
||||
if ( buf_priv->kernel_virtual && buf->total )
|
||||
DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total);
|
||||
DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total, dev);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -340,7 +340,7 @@ static int i810_freelist_init(drm_device_t *dev, drm_i810_private_t *dev_priv)
|
|||
*buf_priv->in_use = I810_BUF_FREE;
|
||||
|
||||
buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address,
|
||||
buf->total);
|
||||
buf->total, dev);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -393,7 +393,7 @@ static int i810_dma_initialize(drm_device_t *dev,
|
|||
|
||||
dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base +
|
||||
init->ring_start,
|
||||
init->ring_size);
|
||||
init->ring_size, dev);
|
||||
|
||||
if (dev_priv->ring.virtual_start == NULL) {
|
||||
dev->dev_private = (void *) dev_priv;
|
||||
|
|
|
@ -253,7 +253,7 @@ int i830_dma_cleanup(drm_device_t *dev)
|
|||
|
||||
if (dev_priv->ring.virtual_start) {
|
||||
DRM(ioremapfree)((void *) dev_priv->ring.virtual_start,
|
||||
dev_priv->ring.Size);
|
||||
dev_priv->ring.Size, dev);
|
||||
}
|
||||
if (dev_priv->hw_status_page) {
|
||||
pci_free_consistent(dev->pdev, PAGE_SIZE,
|
||||
|
@ -271,7 +271,7 @@ int i830_dma_cleanup(drm_device_t *dev)
|
|||
drm_buf_t *buf = dma->buflist[ i ];
|
||||
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
|
||||
if ( buf_priv->kernel_virtual && buf->total )
|
||||
DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total);
|
||||
DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total, dev);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -347,7 +347,7 @@ static int i830_freelist_init(drm_device_t *dev, drm_i830_private_t *dev_priv)
|
|||
*buf_priv->in_use = I830_BUF_FREE;
|
||||
|
||||
buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address,
|
||||
buf->total);
|
||||
buf->total, dev);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -401,7 +401,7 @@ static int i830_dma_initialize(drm_device_t *dev,
|
|||
|
||||
dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base +
|
||||
init->ring_start,
|
||||
init->ring_size);
|
||||
init->ring_size, dev);
|
||||
|
||||
if (dev_priv->ring.virtual_start == NULL) {
|
||||
dev->dev_private = (void *) dev_priv;
|
||||
|
|
|
@ -264,18 +264,36 @@ CC += -I$(DRMSRCDIR)
|
|||
# Check for Red Hat's 4-argument do_munmap().
|
||||
DOMUNMAP := $(shell grep do_munmap $(LINUXDIR)/include/linux/mm.h | \
|
||||
grep -c acct)
|
||||
# Check for 5-argument remap_page_range() in RH9 kernel, and 2.5.x kernels
|
||||
RPR := $(shell grep remap_page_range $(LINUXDIR)/include/linux/mm.h | \
|
||||
grep -c vma)
|
||||
|
||||
ifneq ($(DOMUNMAP),0)
|
||||
EXTRA_CFLAGS += -DDO_MUNMAP_4_ARGS
|
||||
endif
|
||||
|
||||
# Check for 5-argument remap_page_range() in RH9 kernel, and 2.5.x kernels
|
||||
RPR := $(shell grep remap_page_range $(LINUXDIR)/include/linux/mm.h | \
|
||||
grep -c vma)
|
||||
|
||||
ifneq ($(RPR),0)
|
||||
EXTRA_CFLAGS += -DREMAP_PAGE_RANGE_5_ARGS
|
||||
endif
|
||||
|
||||
# Check for 4-argument vmap() in some 2.5.x and 2.4.x kernels
|
||||
VMAP := $(shell grep -A1 'vmap.*count,$$' $(LINUXDIR)/include/linux/vmalloc.h | \
|
||||
grep -c prot)
|
||||
|
||||
ifneq ($(VMAP),0)
|
||||
EXTRA_CFLAGS += -DVMAP_4_ARGS
|
||||
endif
|
||||
|
||||
# Check for PAGE_AGP definition
|
||||
PAGE_AGP := $(shell cat $(LINUXDIR)/include/asm/agp.h 2>/dev/null | \
|
||||
grep -c PAGE_AGP)
|
||||
|
||||
ifneq ($(PAGE_AGP),0)
|
||||
EXTRA_CFLAGS += -DHAVE_PAGE_AGP
|
||||
endif
|
||||
|
||||
|
||||
# Start with all modules turned off.
|
||||
CONFIG_DRM_GAMMA := n
|
||||
CONFIG_DRM_TDFX := n
|
||||
|
|
23
linux/drmP.h
23
linux/drmP.h
|
@ -251,16 +251,16 @@ static inline struct page * vmalloc_to_page(void * vmalloc_addr)
|
|||
if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; }
|
||||
|
||||
/* Mapping helper macros */
|
||||
#define DRM_IOREMAP(map) \
|
||||
(map)->handle = DRM(ioremap)( (map)->offset, (map)->size )
|
||||
#define DRM_IOREMAP(map, dev) \
|
||||
(map)->handle = DRM(ioremap)( (map)->offset, (map)->size, (dev) )
|
||||
|
||||
#define DRM_IOREMAP_NOCACHE(map) \
|
||||
(map)->handle = DRM(ioremap_nocache)((map)->offset, (map)->size)
|
||||
#define DRM_IOREMAP_NOCACHE(map, dev) \
|
||||
(map)->handle = DRM(ioremap_nocache)((map)->offset, (map)->size, (dev))
|
||||
|
||||
#define DRM_IOREMAPFREE(map) \
|
||||
do { \
|
||||
if ( (map)->handle && (map)->size ) \
|
||||
DRM(ioremapfree)( (map)->handle, (map)->size ); \
|
||||
#define DRM_IOREMAPFREE(map, dev) \
|
||||
do { \
|
||||
if ( (map)->handle && (map)->size ) \
|
||||
DRM(ioremapfree)( (map)->handle, (map)->size, (dev) ); \
|
||||
} while (0)
|
||||
|
||||
#define DRM_FIND_MAP(_map, _o) \
|
||||
|
@ -682,9 +682,10 @@ extern void DRM(free)(void *pt, size_t size, int area);
|
|||
extern unsigned long DRM(alloc_pages)(int order, int area);
|
||||
extern void DRM(free_pages)(unsigned long address, int order,
|
||||
int area);
|
||||
extern void *DRM(ioremap)(unsigned long offset, unsigned long size);
|
||||
extern void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size);
|
||||
extern void DRM(ioremapfree)(void *pt, unsigned long size);
|
||||
extern void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev);
|
||||
extern void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size,
|
||||
drm_device_t *dev);
|
||||
extern void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev);
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
extern agp_memory *DRM(alloc_agp)(int pages, u32 type);
|
||||
|
|
|
@ -124,7 +124,7 @@ int DRM(addmap)( struct inode *inode, struct file *filp,
|
|||
MTRR_TYPE_WRCOMB, 1 );
|
||||
}
|
||||
#endif
|
||||
map->handle = DRM(ioremap)( map->offset, map->size );
|
||||
map->handle = DRM(ioremap)( map->offset, map->size, dev );
|
||||
break;
|
||||
|
||||
case _DRM_SHM:
|
||||
|
@ -246,7 +246,7 @@ int DRM(rmmap)(struct inode *inode, struct file *filp,
|
|||
DRM_DEBUG("mtrr_del = %d\n", retcode);
|
||||
}
|
||||
#endif
|
||||
DRM(ioremapfree)(map->handle, map->size);
|
||||
DRM(ioremapfree)(map->handle, map->size, dev);
|
||||
break;
|
||||
case _DRM_SHM:
|
||||
vfree(map->handle);
|
||||
|
|
|
@ -454,7 +454,7 @@ static int DRM(takedown)( drm_device_t *dev )
|
|||
DRM_DEBUG( "mtrr_del=%d\n", retcode );
|
||||
}
|
||||
#endif
|
||||
DRM(ioremapfree)( map->handle, map->size );
|
||||
DRM(ioremapfree)( map->handle, map->size, dev );
|
||||
break;
|
||||
case _DRM_SHM:
|
||||
vfree(map->handle);
|
||||
|
|
|
@ -39,6 +39,159 @@
|
|||
*/
|
||||
#define DEBUG_MEMORY 0
|
||||
|
||||
/* Need the 4-argument version of vmap(). */
|
||||
#if __REALLY_HAVE_AGP && defined(VMAP_4_ARGS)
|
||||
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#ifdef HAVE_PAGE_AGP
|
||||
#include <asm/agp.h>
|
||||
#else
|
||||
# ifdef __powerpc__
|
||||
# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
|
||||
# else
|
||||
# define PAGE_AGP PAGE_KERNEL
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
|
||||
# include <asm/tlbflush.h>
|
||||
#else
|
||||
# define pte_offset_kernel(dir, address) pte_offset(dir, address)
|
||||
# define pte_pfn(pte) (pte_page(pte) - mem_map)
|
||||
# define pfn_to_page(pfn) (mem_map + (pfn))
|
||||
# define flush_tlb_kernel_range(s,e) flush_tlb_all()
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Find the drm_map that covers the range [offset, offset+size).
|
||||
*/
|
||||
static inline drm_map_t *
|
||||
drm_lookup_map (unsigned long offset, unsigned long size, drm_device_t *dev)
|
||||
{
|
||||
struct list_head *list;
|
||||
drm_map_list_t *r_list;
|
||||
drm_map_t *map;
|
||||
|
||||
list_for_each(list, &dev->maplist->head) {
|
||||
r_list = (drm_map_list_t *) list;
|
||||
map = r_list->map;
|
||||
if (!map)
|
||||
continue;
|
||||
if (map->offset <= offset && (offset + size) <= (map->offset + map->size))
|
||||
return map;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void *
|
||||
agp_remap (unsigned long offset, unsigned long size, drm_device_t *dev)
|
||||
{
|
||||
unsigned long *phys_addr_map, i, num_pages = PAGE_ALIGN(size) / PAGE_SIZE;
|
||||
struct drm_agp_mem *agpmem;
|
||||
struct page **page_map;
|
||||
void *addr;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
#ifdef __alpha__
|
||||
offset -= dev->hose->mem_space->start;
|
||||
#endif
|
||||
|
||||
for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next)
|
||||
if (agpmem->bound <= offset
|
||||
&& (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= (offset + size))
|
||||
break;
|
||||
if (!agpmem)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* OK, we're mapping AGP space on a chipset/platform on which memory accesses by
|
||||
* the CPU do not get remapped by the GART. We fix this by using the kernel's
|
||||
* page-table instead (that's probably faster anyhow...).
|
||||
*/
|
||||
/* note: use vmalloc() because num_pages could be large... */
|
||||
page_map = vmalloc(num_pages * sizeof(struct page *));
|
||||
if (!page_map)
|
||||
return NULL;
|
||||
|
||||
phys_addr_map = agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
|
||||
for (i = 0; i < num_pages; ++i)
|
||||
page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
|
||||
addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
|
||||
vfree(page_map);
|
||||
if (!addr)
|
||||
return NULL;
|
||||
|
||||
flush_tlb_kernel_range((unsigned long) addr, (unsigned long) addr + size);
|
||||
return addr;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
drm_follow_page (void *vaddr)
|
||||
{
|
||||
pgd_t *pgd = pgd_offset_k((unsigned long) vaddr);
|
||||
pmd_t *pmd = pmd_offset(pgd, (unsigned long) vaddr);
|
||||
pte_t *ptep = pte_offset_kernel(pmd, (unsigned long) vaddr);
|
||||
return pte_pfn(*ptep) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
#endif /* __REALLY_HAVE_AGP && defined(VMAP_4_ARGS) */
|
||||
|
||||
static inline void *drm_ioremap(unsigned long offset, unsigned long size, drm_device_t *dev)
|
||||
{
|
||||
#if __REALLY_HAVE_AGP && defined(VMAP_4_ARGS)
|
||||
if (dev->agp && dev->agp->cant_use_aperture) {
|
||||
drm_map_t *map = drm_lookup_map(offset, size, dev);
|
||||
|
||||
if (map && map->type == _DRM_AGP)
|
||||
return agp_remap(offset, size, dev);
|
||||
}
|
||||
#endif
|
||||
|
||||
return ioremap(offset, size);
|
||||
}
|
||||
|
||||
static inline void *drm_ioremap_nocache(unsigned long offset, unsigned long size,
|
||||
drm_device_t *dev)
|
||||
{
|
||||
#if __REALLY_HAVE_AGP && defined(VMAP_4_ARGS)
|
||||
if (dev->agp && dev->agp->cant_use_aperture) {
|
||||
drm_map_t *map = drm_lookup_map(offset, size, dev);
|
||||
|
||||
if (map && map->type == _DRM_AGP)
|
||||
return agp_remap(offset, size, dev);
|
||||
}
|
||||
#endif
|
||||
|
||||
return ioremap_nocache(offset, size);
|
||||
}
|
||||
|
||||
static inline void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev)
|
||||
{
|
||||
#if __REALLY_HAVE_AGP && defined(VMAP_4_ARGS)
|
||||
/*
|
||||
* This is a bit ugly. It would be much cleaner if the DRM API would use separate
|
||||
* routines for handling mappings in the AGP space. Hopefully this can be done in
|
||||
* a future revision of the interface...
|
||||
*/
|
||||
if (dev->agp && dev->agp->cant_use_aperture
|
||||
&& ((unsigned long) pt >= VMALLOC_START && (unsigned long) pt < VMALLOC_END))
|
||||
{
|
||||
unsigned long offset;
|
||||
drm_map_t *map;
|
||||
|
||||
offset = drm_follow_page(pt) | ((unsigned long) pt & ~PAGE_MASK);
|
||||
map = drm_lookup_map(offset, size, dev);
|
||||
if (map && map->type == _DRM_AGP) {
|
||||
vunmap(pt);
|
||||
return;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
iounmap(pt);
|
||||
}
|
||||
|
||||
#if DEBUG_MEMORY
|
||||
#include "drm_memory_debug.h"
|
||||
|
@ -119,19 +272,19 @@ void DRM(free_pages)(unsigned long address, int order, int area)
|
|||
free_pages(address, order);
|
||||
}
|
||||
|
||||
void *DRM(ioremap)(unsigned long offset, unsigned long size)
|
||||
void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev)
|
||||
{
|
||||
return ioremap(offset, size);
|
||||
return drm_ioremap(offset, size, dev);
|
||||
}
|
||||
|
||||
void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size)
|
||||
void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, drm_device_t *dev)
|
||||
{
|
||||
return ioremap_nocache(offset, size);
|
||||
return drm_ioremap_nocache(offset, size, dev);
|
||||
}
|
||||
|
||||
void DRM(ioremapfree)(void *pt, unsigned long size)
|
||||
void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev)
|
||||
{
|
||||
iounmap(pt);
|
||||
drm_ioremapfree(pt, size, dev);
|
||||
}
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
|
|
|
@ -270,7 +270,7 @@ void DRM(free_pages)(unsigned long address, int order, int area)
|
|||
}
|
||||
}
|
||||
|
||||
void *DRM(ioremap)(unsigned long offset, unsigned long size)
|
||||
void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev)
|
||||
{
|
||||
void *pt;
|
||||
|
||||
|
@ -280,7 +280,7 @@ void *DRM(ioremap)(unsigned long offset, unsigned long size)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (!(pt = ioremap(offset, size))) {
|
||||
if (!(pt = drm_ioremap(offset, size, dev))) {
|
||||
spin_lock(&DRM(mem_lock));
|
||||
++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
|
@ -293,7 +293,7 @@ void *DRM(ioremap)(unsigned long offset, unsigned long size)
|
|||
return pt;
|
||||
}
|
||||
|
||||
void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size)
|
||||
void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, drm_device_t *dev)
|
||||
{
|
||||
void *pt;
|
||||
|
||||
|
@ -303,7 +303,7 @@ void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (!(pt = ioremap_nocache(offset, size))) {
|
||||
if (!(pt = drm_ioremap_nocache(offset, size, dev))) {
|
||||
spin_lock(&DRM(mem_lock));
|
||||
++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
|
@ -316,7 +316,7 @@ void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size)
|
|||
return pt;
|
||||
}
|
||||
|
||||
void DRM(ioremapfree)(void *pt, unsigned long size)
|
||||
void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev)
|
||||
{
|
||||
int alloc_count;
|
||||
int free_count;
|
||||
|
@ -325,7 +325,7 @@ void DRM(ioremapfree)(void *pt, unsigned long size)
|
|||
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
|
||||
"Attempt to free NULL pointer\n");
|
||||
else
|
||||
iounmap(pt);
|
||||
drm_ioremapfree(pt, size, dev);
|
||||
|
||||
spin_lock(&DRM(mem_lock));
|
||||
DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_freed += size;
|
||||
|
|
|
@ -108,12 +108,12 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
|
|||
* Get the page, inc the use count, and return it
|
||||
*/
|
||||
offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
|
||||
agpmem->memory->memory[offset] &= dev->agp->page_mask;
|
||||
page = virt_to_page(__va(agpmem->memory->memory[offset]));
|
||||
get_page(page);
|
||||
|
||||
DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx\n",
|
||||
baddr, __va(agpmem->memory->memory[offset]), offset);
|
||||
DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
|
||||
baddr, __va(agpmem->memory->memory[offset]), offset,
|
||||
atomic_read(&page->count));
|
||||
|
||||
return page;
|
||||
}
|
||||
|
@ -207,7 +207,7 @@ void DRM(vm_shm_close)(struct vm_area_struct *vma)
|
|||
DRM_DEBUG("mtrr_del = %d\n", retcode);
|
||||
}
|
||||
#endif
|
||||
DRM(ioremapfree)(map->handle, map->size);
|
||||
DRM(ioremapfree)(map->handle, map->size, dev);
|
||||
break;
|
||||
case _DRM_SHM:
|
||||
vfree(map->handle);
|
||||
|
@ -381,7 +381,16 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
|
|||
|
||||
if ( !priv->authenticated ) return -EACCES;
|
||||
|
||||
if (!VM_OFFSET(vma)) return DRM(mmap_dma)(filp, vma);
|
||||
/* We check for "dma". On Apple's UniNorth, it's valid to have
|
||||
* the AGP mapped at physical address 0
|
||||
* --BenH.
|
||||
*/
|
||||
if (!VM_OFFSET(vma)
|
||||
#if __REALLY_HAVE_AGP
|
||||
&& (!dev->agp || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
|
||||
#endif
|
||||
)
|
||||
return DRM(mmap_dma)(filp, vma);
|
||||
|
||||
/* A sequential search of a linked list is
|
||||
fine here because: 1) there will only be
|
||||
|
@ -421,15 +430,19 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
|
|||
|
||||
switch (map->type) {
|
||||
case _DRM_AGP:
|
||||
#if defined(__alpha__)
|
||||
#if __REALLY_HAVE_AGP
|
||||
if (dev->agp->cant_use_aperture) {
|
||||
/*
|
||||
* On Alpha we can't talk to bus dma address from the
|
||||
* CPU, so for memory of type DRM_AGP, we'll deal with
|
||||
* sorting out the real physical pages and mappings
|
||||
* in nopage()
|
||||
* On some platforms we can't talk to bus dma address from the CPU, so for
|
||||
* memory of type DRM_AGP, we'll deal with sorting out the real physical
|
||||
* pages and mappings in nopage()
|
||||
*/
|
||||
#if defined(__powerpc__)
|
||||
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
|
||||
#endif
|
||||
vma->vm_ops = &DRM(vm_ops);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
/* fall through to _DRM_FRAME_BUFFER... */
|
||||
case _DRM_FRAME_BUFFER:
|
||||
|
@ -440,15 +453,15 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
|
|||
pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
|
||||
pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
|
||||
}
|
||||
#elif defined(__ia64__)
|
||||
if (map->type != _DRM_AGP)
|
||||
vma->vm_page_prot =
|
||||
pgprot_writecombine(vma->vm_page_prot);
|
||||
#elif defined(__powerpc__)
|
||||
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED;
|
||||
#endif
|
||||
vma->vm_flags |= VM_IO; /* not in core dump */
|
||||
}
|
||||
#if defined(__ia64__)
|
||||
if (map->type != _DRM_AGP)
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
#endif
|
||||
offset = DRIVER_GET_REG_OFS();
|
||||
#ifdef __sparc__
|
||||
if (io_remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
|
||||
|
|
|
@ -618,7 +618,7 @@ static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init )
|
|||
} else {
|
||||
DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset );
|
||||
|
||||
DRM_IOREMAP( dev_priv->buffers );
|
||||
DRM_IOREMAP( dev_priv->buffers, dev );
|
||||
|
||||
buf = dma->buflist[GLINT_DRI_BUF_COUNT];
|
||||
pgt = buf->address;
|
||||
|
@ -657,7 +657,7 @@ int gamma_do_cleanup_dma( drm_device_t *dev )
|
|||
drm_gamma_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
if ( dev_priv->buffers != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->buffers );
|
||||
DRM_IOREMAPFREE( dev_priv->buffers, dev );
|
||||
|
||||
DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t),
|
||||
DRM_MEM_DRIVER );
|
||||
|
|
|
@ -253,7 +253,7 @@ int i810_dma_cleanup(drm_device_t *dev)
|
|||
|
||||
if(dev_priv->ring.virtual_start) {
|
||||
DRM(ioremapfree)((void *) dev_priv->ring.virtual_start,
|
||||
dev_priv->ring.Size);
|
||||
dev_priv->ring.Size, dev);
|
||||
}
|
||||
if (dev_priv->hw_status_page) {
|
||||
pci_free_consistent(dev->pdev, PAGE_SIZE,
|
||||
|
@ -270,7 +270,7 @@ int i810_dma_cleanup(drm_device_t *dev)
|
|||
drm_buf_t *buf = dma->buflist[ i ];
|
||||
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
|
||||
if ( buf_priv->kernel_virtual && buf->total )
|
||||
DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total);
|
||||
DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total, dev);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -340,7 +340,7 @@ static int i810_freelist_init(drm_device_t *dev, drm_i810_private_t *dev_priv)
|
|||
*buf_priv->in_use = I810_BUF_FREE;
|
||||
|
||||
buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address,
|
||||
buf->total);
|
||||
buf->total, dev);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -393,7 +393,7 @@ static int i810_dma_initialize(drm_device_t *dev,
|
|||
|
||||
dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base +
|
||||
init->ring_start,
|
||||
init->ring_size);
|
||||
init->ring_size, dev);
|
||||
|
||||
if (dev_priv->ring.virtual_start == NULL) {
|
||||
dev->dev_private = (void *) dev_priv;
|
||||
|
|
|
@ -253,7 +253,7 @@ int i830_dma_cleanup(drm_device_t *dev)
|
|||
|
||||
if (dev_priv->ring.virtual_start) {
|
||||
DRM(ioremapfree)((void *) dev_priv->ring.virtual_start,
|
||||
dev_priv->ring.Size);
|
||||
dev_priv->ring.Size, dev);
|
||||
}
|
||||
if (dev_priv->hw_status_page) {
|
||||
pci_free_consistent(dev->pdev, PAGE_SIZE,
|
||||
|
@ -271,7 +271,7 @@ int i830_dma_cleanup(drm_device_t *dev)
|
|||
drm_buf_t *buf = dma->buflist[ i ];
|
||||
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
|
||||
if ( buf_priv->kernel_virtual && buf->total )
|
||||
DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total);
|
||||
DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total, dev);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -347,7 +347,7 @@ static int i830_freelist_init(drm_device_t *dev, drm_i830_private_t *dev_priv)
|
|||
*buf_priv->in_use = I830_BUF_FREE;
|
||||
|
||||
buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address,
|
||||
buf->total);
|
||||
buf->total, dev);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -401,7 +401,7 @@ static int i830_dma_initialize(drm_device_t *dev,
|
|||
|
||||
dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base +
|
||||
init->ring_start,
|
||||
init->ring_size);
|
||||
init->ring_size, dev);
|
||||
|
||||
if (dev_priv->ring.virtual_start == NULL) {
|
||||
dev->dev_private = (void *) dev_priv;
|
||||
|
|
|
@ -554,9 +554,9 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
|
|||
(drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle +
|
||||
init->sarea_priv_offset);
|
||||
|
||||
DRM_IOREMAP( dev_priv->warp );
|
||||
DRM_IOREMAP( dev_priv->primary );
|
||||
DRM_IOREMAP( dev_priv->buffers );
|
||||
DRM_IOREMAP( dev_priv->warp, dev );
|
||||
DRM_IOREMAP( dev_priv->primary, dev );
|
||||
DRM_IOREMAP( dev_priv->buffers, dev );
|
||||
|
||||
if(!dev_priv->warp->handle ||
|
||||
!dev_priv->primary->handle ||
|
||||
|
@ -651,11 +651,11 @@ int mga_do_cleanup_dma( drm_device_t *dev )
|
|||
drm_mga_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
if ( dev_priv->warp != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->warp );
|
||||
DRM_IOREMAPFREE( dev_priv->warp, dev );
|
||||
if ( dev_priv->primary != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->primary );
|
||||
DRM_IOREMAPFREE( dev_priv->primary, dev );
|
||||
if ( dev_priv->buffers != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->buffers );
|
||||
DRM_IOREMAPFREE( dev_priv->buffers, dev );
|
||||
|
||||
if ( dev_priv->head != NULL ) {
|
||||
mga_freelist_cleanup( dev );
|
||||
|
|
|
@ -226,7 +226,7 @@ do { \
|
|||
if ( MGA_VERBOSE ) { \
|
||||
DRM_INFO( "BEGIN_DMA( %d ) in %s\n", \
|
||||
(n), __FUNCTION__ ); \
|
||||
DRM_INFO( " space=0x%x req=0x%x\n", \
|
||||
DRM_INFO( " space=0x%x req=0x%Zx\n", \
|
||||
dev_priv->prim.space, (n) * DMA_BLOCK_SIZE ); \
|
||||
} \
|
||||
prim = dev_priv->prim.start; \
|
||||
|
@ -276,7 +276,7 @@ do { \
|
|||
#define DMA_WRITE( offset, val ) \
|
||||
do { \
|
||||
if ( MGA_VERBOSE ) { \
|
||||
DRM_INFO( " DMA_WRITE( 0x%08x ) at 0x%04x\n", \
|
||||
DRM_INFO( " DMA_WRITE( 0x%08x ) at 0x%04Zx\n", \
|
||||
(u32)(val), write + (offset) * sizeof(u32) ); \
|
||||
} \
|
||||
*(volatile u32 *)(prim + write + (offset) * sizeof(u32)) = val; \
|
||||
|
|
|
@ -350,8 +350,8 @@ static void r128_cce_init_ring_buffer( drm_device_t *dev,
|
|||
|
||||
R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR,
|
||||
entry->busaddr[page_ofs]);
|
||||
DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n",
|
||||
entry->busaddr[page_ofs],
|
||||
DRM_DEBUG( "ring rptr: offset=0x%08lx handle=0x%08lx\n",
|
||||
(unsigned long) entry->busaddr[page_ofs],
|
||||
entry->handle + tmp_ofs );
|
||||
}
|
||||
|
||||
|
@ -539,10 +539,11 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
|
|||
(drm_r128_sarea_t *)((u8 *)dev_priv->sarea->handle +
|
||||
init->sarea_priv_offset);
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
if ( !dev_priv->is_pci ) {
|
||||
DRM_IOREMAP( dev_priv->cce_ring );
|
||||
DRM_IOREMAP( dev_priv->ring_rptr );
|
||||
DRM_IOREMAP( dev_priv->buffers );
|
||||
DRM_IOREMAP( dev_priv->cce_ring, dev );
|
||||
DRM_IOREMAP( dev_priv->ring_rptr, dev );
|
||||
DRM_IOREMAP( dev_priv->buffers, dev );
|
||||
if(!dev_priv->cce_ring->handle ||
|
||||
!dev_priv->ring_rptr->handle ||
|
||||
!dev_priv->buffers->handle) {
|
||||
|
@ -551,7 +552,9 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
|
|||
r128_do_cleanup_cce( dev );
|
||||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
} else {
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
dev_priv->cce_ring->handle =
|
||||
(void *)dev_priv->cce_ring->offset;
|
||||
dev_priv->ring_rptr->handle =
|
||||
|
@ -625,23 +628,22 @@ int r128_do_cleanup_cce( drm_device_t *dev )
|
|||
if ( dev->dev_private ) {
|
||||
drm_r128_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
#if __REALLY_HAVE_SG
|
||||
#if __REALLY_HAVE_AGP
|
||||
if ( !dev_priv->is_pci ) {
|
||||
#endif
|
||||
if ( dev_priv->cce_ring != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->cce_ring );
|
||||
DRM_IOREMAPFREE( dev_priv->cce_ring, dev );
|
||||
if ( dev_priv->ring_rptr != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->ring_rptr );
|
||||
DRM_IOREMAPFREE( dev_priv->ring_rptr, dev );
|
||||
if ( dev_priv->buffers != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->buffers );
|
||||
#if __REALLY_HAVE_SG
|
||||
} else {
|
||||
DRM_IOREMAPFREE( dev_priv->buffers, dev );
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if (!DRM(ati_pcigart_cleanup)( dev,
|
||||
dev_priv->phys_pci_gart,
|
||||
dev_priv->bus_pci_gart ))
|
||||
DRM_ERROR( "failed to cleanup PCI GART!\n" );
|
||||
}
|
||||
#endif
|
||||
|
||||
DRM(free)( dev->dev_private, sizeof(drm_r128_private_t),
|
||||
DRM_MEM_DRIVER );
|
||||
|
|
|
@ -1151,10 +1151,11 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
|||
(drm_radeon_sarea_t *)((u8 *)dev_priv->sarea->handle +
|
||||
init->sarea_priv_offset);
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
if ( !dev_priv->is_pci ) {
|
||||
DRM_IOREMAP( dev_priv->cp_ring );
|
||||
DRM_IOREMAP( dev_priv->ring_rptr );
|
||||
DRM_IOREMAP( dev_priv->buffers );
|
||||
DRM_IOREMAP( dev_priv->cp_ring, dev );
|
||||
DRM_IOREMAP( dev_priv->ring_rptr, dev );
|
||||
DRM_IOREMAP( dev_priv->buffers, dev );
|
||||
if(!dev_priv->cp_ring->handle ||
|
||||
!dev_priv->ring_rptr->handle ||
|
||||
!dev_priv->buffers->handle) {
|
||||
|
@ -1163,7 +1164,9 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
|||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
} else {
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
dev_priv->cp_ring->handle =
|
||||
(void *)dev_priv->cp_ring->offset;
|
||||
dev_priv->ring_rptr->handle =
|
||||
|
@ -1210,7 +1213,6 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
|||
|
||||
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
|
||||
|
||||
#if __REALLY_HAVE_SG
|
||||
if ( dev_priv->is_pci ) {
|
||||
if (!DRM(ati_pcigart_init)( dev, &dev_priv->phys_pci_gart,
|
||||
&dev_priv->bus_pci_gart)) {
|
||||
|
@ -1240,15 +1242,12 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
|||
RADEON_WRITE( RADEON_MC_AGP_LOCATION, 0xffffffc0 ); /* ?? */
|
||||
RADEON_WRITE( RADEON_AGP_COMMAND, 0 ); /* clear AGP_COMMAND */
|
||||
} else {
|
||||
#endif /* __REALLY_HAVE_SG */
|
||||
/* Turn off PCI GART
|
||||
*/
|
||||
tmp = RADEON_READ( RADEON_AIC_CNTL )
|
||||
& ~RADEON_PCIGART_TRANSLATE_EN;
|
||||
RADEON_WRITE( RADEON_AIC_CNTL, tmp );
|
||||
#if __REALLY_HAVE_SG
|
||||
}
|
||||
#endif /* __REALLY_HAVE_SG */
|
||||
|
||||
radeon_cp_load_microcode( dev_priv );
|
||||
radeon_cp_init_ring_buffer( dev, dev_priv );
|
||||
|
@ -1277,20 +1276,21 @@ int radeon_do_cleanup_cp( drm_device_t *dev )
|
|||
if ( dev->dev_private ) {
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
if ( !dev_priv->is_pci ) {
|
||||
if ( dev_priv->cp_ring != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->cp_ring );
|
||||
DRM_IOREMAPFREE( dev_priv->cp_ring, dev );
|
||||
if ( dev_priv->ring_rptr != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->ring_rptr );
|
||||
DRM_IOREMAPFREE( dev_priv->ring_rptr, dev );
|
||||
if ( dev_priv->buffers != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->buffers );
|
||||
} else {
|
||||
#if __REALLY_HAVE_SG
|
||||
DRM_IOREMAPFREE( dev_priv->buffers, dev );
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if (!DRM(ati_pcigart_cleanup)( dev,
|
||||
dev_priv->phys_pci_gart,
|
||||
dev_priv->bus_pci_gart ))
|
||||
DRM_ERROR( "failed to cleanup PCI GART!\n" );
|
||||
#endif /* __REALLY_HAVE_SG */
|
||||
}
|
||||
|
||||
DRM(free)( dev->dev_private, sizeof(drm_radeon_private_t),
|
||||
|
|
|
@ -554,9 +554,9 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
|
|||
(drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle +
|
||||
init->sarea_priv_offset);
|
||||
|
||||
DRM_IOREMAP( dev_priv->warp );
|
||||
DRM_IOREMAP( dev_priv->primary );
|
||||
DRM_IOREMAP( dev_priv->buffers );
|
||||
DRM_IOREMAP( dev_priv->warp, dev );
|
||||
DRM_IOREMAP( dev_priv->primary, dev );
|
||||
DRM_IOREMAP( dev_priv->buffers, dev );
|
||||
|
||||
if(!dev_priv->warp->handle ||
|
||||
!dev_priv->primary->handle ||
|
||||
|
@ -651,11 +651,11 @@ int mga_do_cleanup_dma( drm_device_t *dev )
|
|||
drm_mga_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
if ( dev_priv->warp != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->warp );
|
||||
DRM_IOREMAPFREE( dev_priv->warp, dev );
|
||||
if ( dev_priv->primary != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->primary );
|
||||
DRM_IOREMAPFREE( dev_priv->primary, dev );
|
||||
if ( dev_priv->buffers != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->buffers );
|
||||
DRM_IOREMAPFREE( dev_priv->buffers, dev );
|
||||
|
||||
if ( dev_priv->head != NULL ) {
|
||||
mga_freelist_cleanup( dev );
|
||||
|
|
|
@ -226,7 +226,7 @@ do { \
|
|||
if ( MGA_VERBOSE ) { \
|
||||
DRM_INFO( "BEGIN_DMA( %d ) in %s\n", \
|
||||
(n), __FUNCTION__ ); \
|
||||
DRM_INFO( " space=0x%x req=0x%x\n", \
|
||||
DRM_INFO( " space=0x%x req=0x%Zx\n", \
|
||||
dev_priv->prim.space, (n) * DMA_BLOCK_SIZE ); \
|
||||
} \
|
||||
prim = dev_priv->prim.start; \
|
||||
|
@ -276,7 +276,7 @@ do { \
|
|||
#define DMA_WRITE( offset, val ) \
|
||||
do { \
|
||||
if ( MGA_VERBOSE ) { \
|
||||
DRM_INFO( " DMA_WRITE( 0x%08x ) at 0x%04x\n", \
|
||||
DRM_INFO( " DMA_WRITE( 0x%08x ) at 0x%04Zx\n", \
|
||||
(u32)(val), write + (offset) * sizeof(u32) ); \
|
||||
} \
|
||||
*(volatile u32 *)(prim + write + (offset) * sizeof(u32)) = val; \
|
||||
|
|
|
@ -350,8 +350,8 @@ static void r128_cce_init_ring_buffer( drm_device_t *dev,
|
|||
|
||||
R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR,
|
||||
entry->busaddr[page_ofs]);
|
||||
DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n",
|
||||
entry->busaddr[page_ofs],
|
||||
DRM_DEBUG( "ring rptr: offset=0x%08lx handle=0x%08lx\n",
|
||||
(unsigned long) entry->busaddr[page_ofs],
|
||||
entry->handle + tmp_ofs );
|
||||
}
|
||||
|
||||
|
@ -539,10 +539,11 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
|
|||
(drm_r128_sarea_t *)((u8 *)dev_priv->sarea->handle +
|
||||
init->sarea_priv_offset);
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
if ( !dev_priv->is_pci ) {
|
||||
DRM_IOREMAP( dev_priv->cce_ring );
|
||||
DRM_IOREMAP( dev_priv->ring_rptr );
|
||||
DRM_IOREMAP( dev_priv->buffers );
|
||||
DRM_IOREMAP( dev_priv->cce_ring, dev );
|
||||
DRM_IOREMAP( dev_priv->ring_rptr, dev );
|
||||
DRM_IOREMAP( dev_priv->buffers, dev );
|
||||
if(!dev_priv->cce_ring->handle ||
|
||||
!dev_priv->ring_rptr->handle ||
|
||||
!dev_priv->buffers->handle) {
|
||||
|
@ -551,7 +552,9 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
|
|||
r128_do_cleanup_cce( dev );
|
||||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
} else {
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
dev_priv->cce_ring->handle =
|
||||
(void *)dev_priv->cce_ring->offset;
|
||||
dev_priv->ring_rptr->handle =
|
||||
|
@ -625,23 +628,22 @@ int r128_do_cleanup_cce( drm_device_t *dev )
|
|||
if ( dev->dev_private ) {
|
||||
drm_r128_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
#if __REALLY_HAVE_SG
|
||||
#if __REALLY_HAVE_AGP
|
||||
if ( !dev_priv->is_pci ) {
|
||||
#endif
|
||||
if ( dev_priv->cce_ring != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->cce_ring );
|
||||
DRM_IOREMAPFREE( dev_priv->cce_ring, dev );
|
||||
if ( dev_priv->ring_rptr != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->ring_rptr );
|
||||
DRM_IOREMAPFREE( dev_priv->ring_rptr, dev );
|
||||
if ( dev_priv->buffers != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->buffers );
|
||||
#if __REALLY_HAVE_SG
|
||||
} else {
|
||||
DRM_IOREMAPFREE( dev_priv->buffers, dev );
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if (!DRM(ati_pcigart_cleanup)( dev,
|
||||
dev_priv->phys_pci_gart,
|
||||
dev_priv->bus_pci_gart ))
|
||||
DRM_ERROR( "failed to cleanup PCI GART!\n" );
|
||||
}
|
||||
#endif
|
||||
|
||||
DRM(free)( dev->dev_private, sizeof(drm_r128_private_t),
|
||||
DRM_MEM_DRIVER );
|
||||
|
|
|
@ -1151,10 +1151,11 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
|||
(drm_radeon_sarea_t *)((u8 *)dev_priv->sarea->handle +
|
||||
init->sarea_priv_offset);
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
if ( !dev_priv->is_pci ) {
|
||||
DRM_IOREMAP( dev_priv->cp_ring );
|
||||
DRM_IOREMAP( dev_priv->ring_rptr );
|
||||
DRM_IOREMAP( dev_priv->buffers );
|
||||
DRM_IOREMAP( dev_priv->cp_ring, dev );
|
||||
DRM_IOREMAP( dev_priv->ring_rptr, dev );
|
||||
DRM_IOREMAP( dev_priv->buffers, dev );
|
||||
if(!dev_priv->cp_ring->handle ||
|
||||
!dev_priv->ring_rptr->handle ||
|
||||
!dev_priv->buffers->handle) {
|
||||
|
@ -1163,7 +1164,9 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
|||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
} else {
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
dev_priv->cp_ring->handle =
|
||||
(void *)dev_priv->cp_ring->offset;
|
||||
dev_priv->ring_rptr->handle =
|
||||
|
@ -1210,7 +1213,6 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
|||
|
||||
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
|
||||
|
||||
#if __REALLY_HAVE_SG
|
||||
if ( dev_priv->is_pci ) {
|
||||
if (!DRM(ati_pcigart_init)( dev, &dev_priv->phys_pci_gart,
|
||||
&dev_priv->bus_pci_gart)) {
|
||||
|
@ -1240,15 +1242,12 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
|||
RADEON_WRITE( RADEON_MC_AGP_LOCATION, 0xffffffc0 ); /* ?? */
|
||||
RADEON_WRITE( RADEON_AGP_COMMAND, 0 ); /* clear AGP_COMMAND */
|
||||
} else {
|
||||
#endif /* __REALLY_HAVE_SG */
|
||||
/* Turn off PCI GART
|
||||
*/
|
||||
tmp = RADEON_READ( RADEON_AIC_CNTL )
|
||||
& ~RADEON_PCIGART_TRANSLATE_EN;
|
||||
RADEON_WRITE( RADEON_AIC_CNTL, tmp );
|
||||
#if __REALLY_HAVE_SG
|
||||
}
|
||||
#endif /* __REALLY_HAVE_SG */
|
||||
|
||||
radeon_cp_load_microcode( dev_priv );
|
||||
radeon_cp_init_ring_buffer( dev, dev_priv );
|
||||
|
@ -1277,20 +1276,21 @@ int radeon_do_cleanup_cp( drm_device_t *dev )
|
|||
if ( dev->dev_private ) {
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
if ( !dev_priv->is_pci ) {
|
||||
if ( dev_priv->cp_ring != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->cp_ring );
|
||||
DRM_IOREMAPFREE( dev_priv->cp_ring, dev );
|
||||
if ( dev_priv->ring_rptr != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->ring_rptr );
|
||||
DRM_IOREMAPFREE( dev_priv->ring_rptr, dev );
|
||||
if ( dev_priv->buffers != NULL )
|
||||
DRM_IOREMAPFREE( dev_priv->buffers );
|
||||
} else {
|
||||
#if __REALLY_HAVE_SG
|
||||
DRM_IOREMAPFREE( dev_priv->buffers, dev );
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if (!DRM(ati_pcigart_cleanup)( dev,
|
||||
dev_priv->phys_pci_gart,
|
||||
dev_priv->bus_pci_gart ))
|
||||
DRM_ERROR( "failed to cleanup PCI GART!\n" );
|
||||
#endif /* __REALLY_HAVE_SG */
|
||||
}
|
||||
|
||||
DRM(free)( dev->dev_private, sizeof(drm_radeon_private_t),
|
||||
|
|
Loading…
Reference in New Issue