Merge remote branch 'origin/master' into modesetting-101

Conflicts:

	linux-core/Makefile.kernel
	shared-core/i915_drv.h
main
Dave Airlie 2008-05-08 10:25:01 +10:00
commit ef204fb5c2
12 changed files with 1615 additions and 150 deletions

View File

@ -14,7 +14,8 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_memory_debug.o ati_pcigart.o drm_sman.o \ drm_memory_debug.o ati_pcigart.o drm_sman.o \
drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \ drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_crtc.o \ drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_crtc.o \
drm_edid.o drm_modes.o drm_bo_lock.o drm_regman.o drm_edid.o drm_modes.o drm_bo_lock.o drm_regman.o \
drm_vm_nopage-compat.o
tdfx-objs := tdfx_drv.o tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o

View File

@ -807,3 +807,4 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
EXPORT_SYMBOL(kmap_atomic_prot_pfn); EXPORT_SYMBOL(kmap_atomic_prot_pfn);
#endif #endif

View File

@ -367,4 +367,23 @@ extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1) #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1)
#endif #endif
#ifndef VM_CAN_NONLINEAR
#define DRM_VM_NOPAGE 1
#endif
#ifdef DRM_VM_NOPAGE
extern struct page *drm_vm_nopage(struct vm_area_struct *vma,
unsigned long address, int *type);
extern struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
unsigned long address, int *type);
extern struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
unsigned long address, int *type);
extern struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
unsigned long address, int *type);
#endif
#endif #endif

View File

@ -81,8 +81,9 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
return tmp; return tmp;
} }
#ifndef DRM_VM_NOPAGE
/** /**
* \c nopage method for AGP virtual memory. * \c fault method for AGP virtual memory.
* *
* \param vma virtual memory area. * \param vma virtual memory area.
* \param address access address. * \param address access address.
@ -92,8 +93,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
* map, get the page, increment the use count and return it. * map, get the page, increment the use count and return it.
*/ */
#if __OS_HAS_AGP #if __OS_HAS_AGP
static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address)
{ {
struct drm_file *priv = vma->vm_file->private_data; struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev; struct drm_device *dev = priv->minor->dev;
@ -105,19 +105,24 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
* Find the right map * Find the right map
*/ */
if (!drm_core_has_AGP(dev)) if (!drm_core_has_AGP(dev))
goto vm_nopage_error; goto vm_fault_error;
if (!dev->agp || !dev->agp->cant_use_aperture) if (!dev->agp || !dev->agp->cant_use_aperture)
goto vm_nopage_error; goto vm_fault_error;
if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
goto vm_nopage_error; goto vm_fault_error;
r_list = drm_hash_entry(hash, struct drm_map_list, hash); r_list = drm_hash_entry(hash, struct drm_map_list, hash);
map = r_list->map; map = r_list->map;
if (map && map->type == _DRM_AGP) { if (map && map->type == _DRM_AGP) {
unsigned long offset = address - vma->vm_start; /*
* Using vm_pgoff as a selector forces us to use this unusual
* addressing scheme.
*/
unsigned long offset = (unsigned long)vmf->virtual_address -
vma->vm_start;
unsigned long baddr = map->offset + offset; unsigned long baddr = map->offset + offset;
struct drm_agp_mem *agpmem; struct drm_agp_mem *agpmem;
struct page *page; struct page *page;
@ -139,7 +144,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
} }
if (!agpmem) if (!agpmem)
goto vm_nopage_error; goto vm_fault_error;
/* /*
* Get the page, inc the use count, and return it * Get the page, inc the use count, and return it
@ -147,25 +152,21 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
offset = (baddr - agpmem->bound) >> PAGE_SHIFT; offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
page = virt_to_page(__va(agpmem->memory->memory[offset])); page = virt_to_page(__va(agpmem->memory->memory[offset]));
get_page(page); get_page(page);
vmf->page = page;
#if 0
/* page_count() not defined everywhere */
DRM_DEBUG DRM_DEBUG
("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n", ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
baddr, __va(agpmem->memory->memory[offset]), offset, baddr, __va(agpmem->memory->memory[offset]), offset,
page_count(page)); page_count(page));
#endif return 0;
return page;
} }
vm_nopage_error: vm_fault_error:
return NOPAGE_SIGBUS; /* Disallow mremap */ return VM_FAULT_SIGBUS; /* Disallow mremap */
} }
#else /* __OS_HAS_AGP */ #else /* __OS_HAS_AGP */
static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address)
{ {
return NOPAGE_SIGBUS; return VM_FAULT_SIGBUS;
} }
#endif /* __OS_HAS_AGP */ #endif /* __OS_HAS_AGP */
@ -179,29 +180,28 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
* Get the mapping, find the real physical page to map, get the page, and * Get the mapping, find the real physical page to map, get the page, and
* return it. * return it.
*/ */
static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address)
{ {
struct drm_map *map = (struct drm_map *) vma->vm_private_data; struct drm_map *map = (struct drm_map *) vma->vm_private_data;
unsigned long offset; unsigned long offset;
unsigned long i; unsigned long i;
struct page *page; struct page *page;
if (address > vma->vm_end)
return NOPAGE_SIGBUS; /* Disallow mremap */
if (!map) if (!map)
return NOPAGE_SIGBUS; /* Nothing allocated */ return VM_FAULT_SIGBUS; /* Nothing allocated */
offset = address - vma->vm_start; offset = (unsigned long)vmf->virtual_address - vma->vm_start;
i = (unsigned long)map->handle + offset; i = (unsigned long)map->handle + offset;
page = vmalloc_to_page((void *)i); page = vmalloc_to_page((void *)i);
if (!page) if (!page)
return NOPAGE_SIGBUS; return VM_FAULT_SIGBUS;
get_page(page); get_page(page);
vmf->page = page;
DRM_DEBUG("0x%lx\n", address); DRM_DEBUG("shm_fault 0x%lx\n", offset);
return page; return 0;
} }
#endif
/** /**
* \c close method for shared virtual memory. * \c close method for shared virtual memory.
@ -283,8 +283,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }
#ifndef DRM_VM_NOPAGE
/** /**
* \c nopage method for DMA virtual memory. * \c fault method for DMA virtual memory.
* *
* \param vma virtual memory area. * \param vma virtual memory area.
* \param address access address. * \param address access address.
@ -292,8 +293,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
* *
* Determine the page number from the page offset and get it from drm_device_dma::pagelist. * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
*/ */
static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address)
{ {
struct drm_file *priv = vma->vm_file->private_data; struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev; struct drm_device *dev = priv->minor->dev;
@ -303,24 +303,23 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
struct page *page; struct page *page;
if (!dma) if (!dma)
return NOPAGE_SIGBUS; /* Error */ return VM_FAULT_SIGBUS; /* Error */
if (address > vma->vm_end)
return NOPAGE_SIGBUS; /* Disallow mremap */
if (!dma->pagelist) if (!dma->pagelist)
return NOPAGE_SIGBUS; /* Nothing allocated */ return VM_FAULT_SIGBUS; /* Nothing allocated */
offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
page_nr = offset >> PAGE_SHIFT; page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK)))); page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
get_page(page); get_page(page);
vmf->page = page;
DRM_DEBUG("0x%lx (page %lu)\n", address, page_nr); DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
return page; return 0;
} }
/** /**
* \c nopage method for scatter-gather virtual memory. * \c fault method for scatter-gather virtual memory.
* *
* \param vma virtual memory area. * \param vma virtual memory area.
* \param address access address. * \param address access address.
@ -328,8 +327,7 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
* *
* Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
*/ */
static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address)
{ {
struct drm_map *map = (struct drm_map *) vma->vm_private_data; struct drm_map *map = (struct drm_map *) vma->vm_private_data;
struct drm_file *priv = vma->vm_file->private_data; struct drm_file *priv = vma->vm_file->private_data;
@ -340,80 +338,62 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
unsigned long page_offset; unsigned long page_offset;
struct page *page; struct page *page;
DRM_DEBUG("\n");
if (!entry) if (!entry)
return NOPAGE_SIGBUS; /* Error */ return VM_FAULT_SIGBUS; /* Error */
if (address > vma->vm_end)
return NOPAGE_SIGBUS; /* Disallow mremap */
if (!entry->pagelist) if (!entry->pagelist)
return NOPAGE_SIGBUS; /* Nothing allocated */ return VM_FAULT_SIGBUS; /* Nothing allocated */
offset = address - vma->vm_start; offset = (unsigned long)vmf->virtual_address - vma->vm_start;
map_offset = map->offset - (unsigned long)dev->sg->virtual; map_offset = map->offset - (unsigned long)dev->sg->virtual;
page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
page = entry->pagelist[page_offset]; page = entry->pagelist[page_offset];
get_page(page); get_page(page);
vmf->page = page;
return page; return 0;
} }
#endif
static struct page *drm_vm_nopage(struct vm_area_struct *vma,
unsigned long address, int *type)
{
if (type)
*type = VM_FAULT_MINOR;
return drm_do_vm_nopage(vma, address);
}
static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
unsigned long address, int *type)
{
if (type)
*type = VM_FAULT_MINOR;
return drm_do_vm_shm_nopage(vma, address);
}
static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
unsigned long address, int *type)
{
if (type)
*type = VM_FAULT_MINOR;
return drm_do_vm_dma_nopage(vma, address);
}
static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
unsigned long address, int *type)
{
if (type)
*type = VM_FAULT_MINOR;
return drm_do_vm_sg_nopage(vma, address);
}
/** AGP virtual memory operations */ /** AGP virtual memory operations */
static struct vm_operations_struct drm_vm_ops = { static struct vm_operations_struct drm_vm_ops = {
#ifdef DRM_VM_NOPAGE
.nopage = drm_vm_nopage, .nopage = drm_vm_nopage,
#else
.fault = drm_do_vm_fault,
#endif
.open = drm_vm_open, .open = drm_vm_open,
.close = drm_vm_close, .close = drm_vm_close,
}; };
/** Shared virtual memory operations */ /** Shared virtual memory operations */
static struct vm_operations_struct drm_vm_shm_ops = { static struct vm_operations_struct drm_vm_shm_ops = {
#ifdef DRM_VM_NOPAGE
.nopage = drm_vm_shm_nopage, .nopage = drm_vm_shm_nopage,
#else
.fault = drm_do_vm_shm_fault,
#endif
.open = drm_vm_open, .open = drm_vm_open,
.close = drm_vm_shm_close, .close = drm_vm_shm_close,
}; };
/** DMA virtual memory operations */ /** DMA virtual memory operations */
static struct vm_operations_struct drm_vm_dma_ops = { static struct vm_operations_struct drm_vm_dma_ops = {
#ifdef DRM_VM_NOPAGE
.nopage = drm_vm_dma_nopage, .nopage = drm_vm_dma_nopage,
#else
.fault = drm_do_vm_dma_fault,
#endif
.open = drm_vm_open, .open = drm_vm_open,
.close = drm_vm_close, .close = drm_vm_close,
}; };
/** Scatter-gather virtual memory operations */ /** Scatter-gather virtual memory operations */
static struct vm_operations_struct drm_vm_sg_ops = { static struct vm_operations_struct drm_vm_sg_ops = {
#ifdef DRM_VM_NOPAGE
.nopage = drm_vm_sg_nopage, .nopage = drm_vm_sg_nopage,
#else
.fault = drm_do_vm_sg_fault,
#endif
.open = drm_vm_open, .open = drm_vm_open,
.close = drm_vm_close, .close = drm_vm_close,
}; };

View File

@ -0,0 +1,267 @@
/**
* \file drm_vm.c
* Memory mapping for DRM
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "drmP.h"
#ifdef DRM_VM_NOPAGE
/**
* \c nopage method for AGP virtual memory.
*
* \param vma virtual memory area.
* \param address access address.
* \return pointer to the page structure.
*
* Find the right map and if it's AGP memory find the real physical page to
* map, get the page, increment the use count and return it.
*/
#if __OS_HAS_AGP
static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
unsigned long address)
{
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_map *map = NULL;
struct drm_map_list *r_list;
struct drm_hash_item *hash;
/*
* Find the right map
*/
if (!drm_core_has_AGP(dev))
goto vm_nopage_error;
if (!dev->agp || !dev->agp->cant_use_aperture)
goto vm_nopage_error;
if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
goto vm_nopage_error;
r_list = drm_hash_entry(hash, struct drm_map_list, hash);
map = r_list->map;
if (map && map->type == _DRM_AGP) {
unsigned long offset = address - vma->vm_start;
unsigned long baddr = map->offset + offset;
struct drm_agp_mem *agpmem;
struct page *page;
#ifdef __alpha__
/*
* Adjust to a bus-relative address
*/
baddr -= dev->hose->mem_space->start;
#endif
/*
* It's AGP memory - find the real physical page to map
*/
list_for_each_entry(agpmem, &dev->agp->memory, head) {
if (agpmem->bound <= baddr &&
agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
break;
}
if (!agpmem)
goto vm_nopage_error;
/*
* Get the page, inc the use count, and return it
*/
offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
page = virt_to_page(__va(agpmem->memory->memory[offset]));
get_page(page);
#if 0
/* page_count() not defined everywhere */
DRM_DEBUG
("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
baddr, __va(agpmem->memory->memory[offset]), offset,
page_count(page));
#endif
return page;
}
vm_nopage_error:
return NOPAGE_SIGBUS; /* Disallow mremap */
}
#else /* __OS_HAS_AGP */
static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
unsigned long address)
{
return NOPAGE_SIGBUS;
}
#endif /* __OS_HAS_AGP */
/**
* \c nopage method for shared virtual memory.
*
* \param vma virtual memory area.
* \param address access address.
* \return pointer to the page structure.
*
* Get the mapping, find the real physical page to map, get the page, and
* return it.
*/
static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
unsigned long address)
{
struct drm_map *map = (struct drm_map *) vma->vm_private_data;
unsigned long offset;
unsigned long i;
struct page *page;
if (address > vma->vm_end)
return NOPAGE_SIGBUS; /* Disallow mremap */
if (!map)
return NOPAGE_SIGBUS; /* Nothing allocated */
offset = address - vma->vm_start;
i = (unsigned long)map->handle + offset;
page = vmalloc_to_page((void *)i);
if (!page)
return NOPAGE_SIGBUS;
get_page(page);
DRM_DEBUG("0x%lx\n", address);
return page;
}
/**
* \c nopage method for DMA virtual memory.
*
* \param vma virtual memory area.
* \param address access address.
* \return pointer to the page structure.
*
* Determine the page number from the page offset and get it from drm_device_dma::pagelist.
*/
static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
unsigned long address)
{
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_device_dma *dma = dev->dma;
unsigned long offset;
unsigned long page_nr;
struct page *page;
if (!dma)
return NOPAGE_SIGBUS; /* Error */
if (address > vma->vm_end)
return NOPAGE_SIGBUS; /* Disallow mremap */
if (!dma->pagelist)
return NOPAGE_SIGBUS; /* Nothing allocated */
offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
page_nr = offset >> PAGE_SHIFT;
page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
get_page(page);
DRM_DEBUG("0x%lx (page %lu)\n", address, page_nr);
return page;
}
/**
* \c nopage method for scatter-gather virtual memory.
*
* \param vma virtual memory area.
* \param address access address.
* \return pointer to the page structure.
*
* Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
*/
static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
unsigned long address)
{
struct drm_map *map = (struct drm_map *) vma->vm_private_data;
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_sg_mem *entry = dev->sg;
unsigned long offset;
unsigned long map_offset;
unsigned long page_offset;
struct page *page;
DRM_DEBUG("\n");
if (!entry)
return NOPAGE_SIGBUS; /* Error */
if (address > vma->vm_end)
return NOPAGE_SIGBUS; /* Disallow mremap */
if (!entry->pagelist)
return NOPAGE_SIGBUS; /* Nothing allocated */
offset = address - vma->vm_start;
map_offset = map->offset - (unsigned long)dev->sg->virtual;
page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
page = entry->pagelist[page_offset];
get_page(page);
return page;
}
struct page *drm_vm_nopage(struct vm_area_struct *vma,
unsigned long address, int *type)
{
if (type)
*type = VM_FAULT_MINOR;
return drm_do_vm_nopage(vma, address);
}
struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
unsigned long address, int *type)
{
if (type)
*type = VM_FAULT_MINOR;
return drm_do_vm_shm_nopage(vma, address);
}
struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
unsigned long address, int *type)
{
if (type)
*type = VM_FAULT_MINOR;
return drm_do_vm_dma_nopage(vma, address);
}
struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
unsigned long address, int *type)
{
if (type)
*type = VM_FAULT_MINOR;
return drm_do_vm_sg_nopage(vma, address);
}
#endif

View File

@ -176,7 +176,7 @@ static void i915_save_vga(struct drm_device *dev)
i915_write_indexed(cr_index, cr_data, 0x11, i915_write_indexed(cr_index, cr_data, 0x11,
i915_read_indexed(cr_index, cr_data, 0x11) & i915_read_indexed(cr_index, cr_data, 0x11) &
(~0x80)); (~0x80));
for (i = 0; i < 0x24; i++) for (i = 0; i <= 0x24; i++)
dev_priv->saveCR[i] = dev_priv->saveCR[i] =
i915_read_indexed(cr_index, cr_data, i); i915_read_indexed(cr_index, cr_data, i);
/* Make sure we don't turn off CR group 0 writes */ /* Make sure we don't turn off CR group 0 writes */
@ -185,7 +185,7 @@ static void i915_save_vga(struct drm_device *dev)
/* Attribute controller registers */ /* Attribute controller registers */
inb(st01); inb(st01);
dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX); dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX);
for (i = 0; i < 20; i++) for (i = 0; i <= 0x14; i++)
dev_priv->saveAR[i] = i915_read_ar(st01, i, 0); dev_priv->saveAR[i] = i915_read_ar(st01, i, 0);
inb(st01); inb(st01);
outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX); outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX);
@ -235,7 +235,7 @@ static void i915_restore_vga(struct drm_device *dev)
/* CRT controller regs */ /* CRT controller regs */
/* Enable CR group 0 writes */ /* Enable CR group 0 writes */
i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]); i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
for (i = 0; i < 0x24; i++) for (i = 0; i <= 0x24; i++)
i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]); i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]);
/* Graphics controller regs */ /* Graphics controller regs */
@ -252,7 +252,7 @@ static void i915_restore_vga(struct drm_device *dev)
/* Attribute controller registers */ /* Attribute controller registers */
inb(st01); /* switch back to index mode */ inb(st01); /* switch back to index mode */
for (i = 0; i < 20; i++) for (i = 0; i <= 0x14; i++)
i915_write_ar(st01, i, dev_priv->saveAR[i], 0); i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
inb(st01); /* switch back to index mode */ inb(st01); /* switch back to index mode */
outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX); outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX);

View File

@ -255,10 +255,10 @@ struct drm_i915_private {
u8 saveSR[8]; u8 saveSR[8];
u8 saveGR[25]; u8 saveGR[25];
u8 saveAR_INDEX; u8 saveAR_INDEX;
u8 saveAR[20]; u8 saveAR[21];
u8 saveDACMASK; u8 saveDACMASK;
u8 saveDACDATA[256*3]; /* 256 3-byte colors */ u8 saveDACDATA[256*3]; /* 256 3-byte colors */
u8 saveCR[36]; u8 saveCR[37];
}; };
enum intel_chip_family { enum intel_chip_family {

View File

@ -71,8 +71,9 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->Engine; struct nouveau_engine *engine = &dev_priv->Engine;
uint32_t status; uint32_t status, reassign;
reassign = NV_READ(NV03_PFIFO_CACHES) & 1;
while ((status = NV_READ(NV03_PFIFO_INTR_0))) { while ((status = NV_READ(NV03_PFIFO_INTR_0))) {
uint32_t chid, get; uint32_t chid, get;
@ -119,9 +120,10 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
if (status) { if (status) {
DRM_INFO("Unhandled PFIFO_INTR - 0x%08x\n", status); DRM_INFO("Unhandled PFIFO_INTR - 0x%08x\n", status);
NV_WRITE(NV03_PFIFO_INTR_0, status); NV_WRITE(NV03_PFIFO_INTR_0, status);
NV_WRITE(NV03_PMC_INTR_EN_0, 0);
} }
NV_WRITE(NV03_PFIFO_CACHES, 1); NV_WRITE(NV03_PFIFO_CACHES, reassign);
} }
NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING); NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
@ -188,6 +190,39 @@ nouveau_print_bitfield_names(uint32_t value,
printk(" (unknown bits 0x%08x)", value); printk(" (unknown bits 0x%08x)", value);
} }
static int
nouveau_graph_chid_from_grctx(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t inst;
int i;
if (dev_priv->card_type < NV_40)
return dev_priv->Engine.fifo.channels;
else
if (dev_priv->card_type < NV_50)
inst = (NV_READ(0x40032c) & 0xfffff) << 4;
else
inst = NV_READ(0x40032c) & 0xfffff;
for (i = 0; i < dev_priv->Engine.fifo.channels; i++) {
struct nouveau_channel *chan = dev_priv->fifos[i];
if (!chan || !chan->ramin_grctx)
continue;
if (dev_priv->card_type < NV_50) {
if (inst == chan->ramin_grctx->instance)
break;
} else {
if (inst == INSTANCE_RD(chan->ramin_grctx->gpuobj, 0))
break;
}
}
return i;
}
static int static int
nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret) nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
{ {
@ -195,49 +230,15 @@ nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
struct nouveau_engine *engine = &dev_priv->Engine; struct nouveau_engine *engine = &dev_priv->Engine;
int channel; int channel;
if (dev_priv->card_type < NV_10) { if (dev_priv->card_type < NV_10)
channel = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf; channel = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
} else if (dev_priv->card_type < NV_40) { else
if (dev_priv->card_type < NV_40)
channel = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; channel = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
} else else
if (dev_priv->card_type < NV_50) { channel = nouveau_graph_chid_from_grctx(dev);
uint32_t cur_grctx = (NV_READ(0x40032C) & 0xfffff) << 4;
/* 0x400704 *sometimes* contains a sensible channel ID, but if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
* mostly not.. for now lookup which channel owns the active
* PGRAPH context. Probably a better way, but this'll do
* for now.
*/
for (channel = 0; channel < 32; channel++) {
if (dev_priv->fifos[channel] == NULL)
continue;
if (cur_grctx ==
dev_priv->fifos[channel]->ramin_grctx->instance)
break;
}
if (channel == 32) {
DRM_ERROR("AIII, unable to determine active channel "
"from PGRAPH context 0x%08x\n", cur_grctx);
return -EINVAL;
}
} else {
uint32_t cur_grctx = (NV_READ(0x40032C) & 0xfffff) << 12;
for (channel = 0; channel < 128; channel++) {
if (dev_priv->fifos[channel] == NULL)
continue;
if (cur_grctx ==
dev_priv->fifos[channel]->ramin_grctx->instance)
break;
}
if (channel == 128) {
DRM_ERROR("AIII, unable to determine active channel "
"from PGRAPH context 0x%08x\n", cur_grctx);
return -EINVAL;
}
}
if (channel > engine->fifo.channels || !dev_priv->fifos[channel]) {
DRM_ERROR("AIII, invalid/inactive channel id %d\n", channel); DRM_ERROR("AIII, invalid/inactive channel id %d\n", channel);
return -EINVAL; return -EINVAL;
} }
@ -251,6 +252,7 @@ struct nouveau_pgraph_trap {
int class; int class;
int subc, mthd, size; int subc, mthd, size;
uint32_t data, data2; uint32_t data, data2;
uint32_t nsource, nstatus;
}; };
static void static void
@ -260,6 +262,12 @@ nouveau_graph_trap_info(struct drm_device *dev,
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t address; uint32_t address;
trap->nsource = trap->nstatus = 0;
if (dev_priv->card_type < NV_50) {
trap->nsource = NV_READ(NV03_PGRAPH_NSOURCE);
trap->nstatus = NV_READ(NV03_PGRAPH_NSTATUS);
}
if (nouveau_graph_trapped_channel(dev, &trap->channel)) if (nouveau_graph_trapped_channel(dev, &trap->channel))
trap->channel = -1; trap->channel = -1;
address = NV_READ(NV04_PGRAPH_TRAPPED_ADDR); address = NV_READ(NV04_PGRAPH_TRAPPED_ADDR);
@ -289,10 +297,7 @@ nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
struct nouveau_pgraph_trap *trap) struct nouveau_pgraph_trap *trap)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t nsource, nstatus; uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
nsource = NV_READ(NV03_PGRAPH_NSOURCE);
nstatus = NV_READ(NV03_PGRAPH_NSTATUS);
DRM_INFO("%s - nSource:", id); DRM_INFO("%s - nSource:", id);
nouveau_print_bitfield_names(nsource, nouveau_nsource_names, nouveau_print_bitfield_names(nsource, nouveau_nsource_names,
@ -347,6 +352,7 @@ nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
int unhandled = 0; int unhandled = 0;
nouveau_graph_trap_info(dev, &trap); nouveau_graph_trap_info(dev, &trap);
trap.nsource = nsource;
if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
if (trap.channel >= 0 && trap.mthd == 0x0150) { if (trap.channel >= 0 && trap.mthd == 0x0150) {
@ -432,6 +438,53 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
} }
static void
nv50_pgraph_irq_handler(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t status;
status = NV_READ(NV03_PGRAPH_INTR);
if (status & 0x00000020) {
nouveau_pgraph_intr_error(dev,
NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
status &= ~0x00000020;
NV_WRITE(NV03_PGRAPH_INTR, 0x00000020);
}
if (status & 0x00100000) {
nouveau_pgraph_intr_error(dev,
NV03_PGRAPH_NSOURCE_DATA_ERROR);
status &= ~0x00100000;
NV_WRITE(NV03_PGRAPH_INTR, 0x00100000);
}
if (status & 0x00200000) {
nouveau_pgraph_intr_error(dev,
NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
status &= ~0x00200000;
NV_WRITE(NV03_PGRAPH_INTR, 0x00200000);
}
if (status) {
DRM_INFO("Unhandled PGRAPH_INTR - 0x%08x\n", status);
NV_WRITE(NV03_PGRAPH_INTR, status);
}
{
const int isb = (1 << 16) | (1 << 0);
if ((NV_READ(0x400500) & isb) != isb)
NV_WRITE(0x400500, NV_READ(0x400500) | isb);
}
NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
}
static void static void
nouveau_crtc_irq_handler(struct drm_device *dev, int crtc) nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
{ {
@ -485,7 +538,11 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
} }
if (status & NV_PMC_INTR_0_PGRAPH_PENDING) { if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
if (dev_priv->card_type >= NV_50)
nv50_pgraph_irq_handler(dev);
else
nouveau_pgraph_irq_handler(dev); nouveau_pgraph_irq_handler(dev);
status &= ~NV_PMC_INTR_0_PGRAPH_PENDING; status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
} }

View File

@ -243,6 +243,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
break; break;
case 0x50: case 0x50:
case 0x80: /* gotta love NVIDIA's consistency.. */ case 0x80: /* gotta love NVIDIA's consistency.. */
case 0x90:
engine->instmem.init = nv50_instmem_init; engine->instmem.init = nv50_instmem_init;
engine->instmem.takedown= nv50_instmem_takedown; engine->instmem.takedown= nv50_instmem_takedown;
engine->instmem.populate = nv50_instmem_populate; engine->instmem.populate = nv50_instmem_populate;

File diff suppressed because it is too large Load Diff

View File

@ -235,6 +235,7 @@ void r300_init_reg_flags(struct drm_device *dev)
ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8); ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) { if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
ADD_RANGE(R500_US_CONFIG, 2);
ADD_RANGE(R500_RS_IP_0, 16); ADD_RANGE(R500_RS_IP_0, 16);
ADD_RANGE(R500_RS_INST_0, 16); ADD_RANGE(R500_RS_INST_0, 16);
} else { } else {

View File

@ -1632,6 +1632,8 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
#define R500_RS_IP_0 0x4074 #define R500_RS_IP_0 0x4074
#define R500_RS_INST_0 0x4320 #define R500_RS_INST_0 0x4320
#define R500_US_CONFIG 0x4600
#endif /* _R300_REG_H */ #endif /* _R300_REG_H */
/* *INDENT-ON* */ /* *INDENT-ON* */