Merge branch 'master' of git+ssh://marcheu@git.freedesktop.org/git/mesa/drm
commit
6eaa1272b4
|
@ -1457,26 +1457,8 @@ extern int drm_fence_buffer_objects(drm_file_t * priv,
|
|||
drm_fence_object_t *fence,
|
||||
drm_fence_object_t **used_fence);
|
||||
|
||||
|
||||
/* Inline replacements for DRM_IOREMAP macros */
|
||||
static __inline__ void drm_core_ioremap(struct drm_map *map,
|
||||
struct drm_device *dev)
|
||||
{
|
||||
map->handle = drm_ioremap(map->offset, map->size, dev);
|
||||
}
|
||||
|
||||
static __inline__ void drm_core_ioremap_nocache(struct drm_map *map,
|
||||
struct drm_device *dev)
|
||||
{
|
||||
map->handle = drm_ioremap_nocache(map->offset, map->size, dev);
|
||||
}
|
||||
|
||||
static __inline__ void drm_core_ioremapfree(struct drm_map *map,
|
||||
struct drm_device *dev)
|
||||
{
|
||||
if (map->handle && map->size)
|
||||
drm_ioremapfree(map->handle, map->size, dev);
|
||||
}
|
||||
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
|
||||
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
|
||||
|
||||
static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
|
||||
unsigned int token)
|
||||
|
|
|
@ -179,7 +179,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
|
|||
}
|
||||
}
|
||||
if (map->type == _DRM_REGISTERS)
|
||||
map->handle = drm_ioremap(map->offset, map->size, dev);
|
||||
map->handle = ioremap(map->offset, map->size);
|
||||
break;
|
||||
case _DRM_SHM:
|
||||
list = drm_find_matching_map(dev, map);
|
||||
|
@ -279,6 +279,8 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
|
|||
|
||||
list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
|
||||
if (!list) {
|
||||
if (map->type == _DRM_REGISTERS)
|
||||
iounmap(map->handle);
|
||||
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -295,6 +297,8 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
|
|||
ret = drm_map_handle(dev, &list->hash, user_token, 0);
|
||||
|
||||
if (ret) {
|
||||
if (map->type == _DRM_REGISTERS)
|
||||
iounmap(map->handle);
|
||||
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
|
||||
drm_free(list, sizeof(*list), DRM_MEM_MAPS);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -402,7 +406,7 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
|
|||
|
||||
switch (map->type) {
|
||||
case _DRM_REGISTERS:
|
||||
drm_ioremapfree(map->handle, map->size, dev);
|
||||
iounmap(map->handle);
|
||||
/* FALLTHROUGH */
|
||||
case _DRM_FRAME_BUFFER:
|
||||
if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
|
||||
|
|
|
@ -244,3 +244,26 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
|
|||
}
|
||||
#endif /* agp */
|
||||
#endif /* debug_memory */
|
||||
|
||||
void drm_core_ioremap(struct drm_map *map, struct drm_device *dev)
|
||||
{
|
||||
if (drm_core_has_AGP(dev) &&
|
||||
dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
|
||||
map->handle = agp_remap(map->offset, map->size, dev);
|
||||
else
|
||||
map->handle = ioremap(map->offset, map->size);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drm_core_ioremap);
|
||||
|
||||
void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
|
||||
{
|
||||
if (!map->handle || !map->size)
|
||||
return;
|
||||
|
||||
if (drm_core_has_AGP(dev) &&
|
||||
dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
|
||||
vunmap(map->handle);
|
||||
else
|
||||
iounmap(map->handle);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drm_core_ioremapfree);
|
||||
|
|
|
@ -122,19 +122,6 @@ static inline void *agp_remap(unsigned long offset, unsigned long size,
|
|||
return addr;
|
||||
}
|
||||
|
||||
static inline unsigned long drm_follow_page(void *vaddr)
|
||||
{
|
||||
pgd_t *pgd = pgd_offset_k((unsigned long) vaddr);
|
||||
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10)
|
||||
pmd_t *pmd = pmd_offset(pgd, (unsigned long)vaddr);
|
||||
#else
|
||||
pud_t *pud = pud_offset(pgd, (unsigned long) vaddr);
|
||||
pmd_t *pmd = pmd_offset(pud, (unsigned long) vaddr);
|
||||
#endif
|
||||
pte_t *ptep = pte_offset_kernel(pmd, (unsigned long) vaddr);
|
||||
return pte_pfn(*ptep) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
#else /* __OS_HAS_AGP */
|
||||
|
||||
static inline drm_map_t *drm_lookup_map(unsigned long offset,
|
||||
|
@ -149,67 +136,4 @@ static inline void *agp_remap(unsigned long offset, unsigned long size,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline unsigned long drm_follow_page(void *vaddr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef DEBUG_MEMORY
|
||||
static inline void *drm_ioremap(unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev)
|
||||
{
|
||||
if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
|
||||
drm_map_t *map = drm_lookup_map(offset, size, dev);
|
||||
|
||||
if (map && map->type == _DRM_AGP)
|
||||
return agp_remap(offset, size, dev);
|
||||
}
|
||||
|
||||
return ioremap(offset, size);
|
||||
}
|
||||
|
||||
static inline void *drm_ioremap_nocache(unsigned long offset,
|
||||
unsigned long size, drm_device_t * dev)
|
||||
{
|
||||
if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
|
||||
drm_map_t *map = drm_lookup_map(offset, size, dev);
|
||||
|
||||
if (map && map->type == _DRM_AGP)
|
||||
return agp_remap(offset, size, dev);
|
||||
}
|
||||
|
||||
return ioremap_nocache(offset, size);
|
||||
}
|
||||
|
||||
static inline void drm_ioremapfree(void *pt, unsigned long size,
|
||||
drm_device_t * dev)
|
||||
{
|
||||
/*
|
||||
* This is a bit ugly. It would be much cleaner if the DRM API would use separate
|
||||
* routines for handling mappings in the AGP space. Hopefully this can be done in
|
||||
* a future revision of the interface...
|
||||
*/
|
||||
if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture
|
||||
&& ((unsigned long)pt >= VMALLOC_START
|
||||
&& (unsigned long)pt < VMALLOC_END)) {
|
||||
unsigned long offset;
|
||||
drm_map_t *map;
|
||||
|
||||
offset = drm_follow_page(pt) | ((unsigned long)pt & ~PAGE_MASK);
|
||||
map = drm_lookup_map(offset, size, dev);
|
||||
if (map && map->type == _DRM_AGP) {
|
||||
vunmap(pt);
|
||||
return;
|
||||
}
|
||||
}
|
||||
iounmap(pt);
|
||||
}
|
||||
#else
|
||||
extern void *drm_ioremap(unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev);
|
||||
extern void *drm_ioremap_nocache(unsigned long offset,
|
||||
unsigned long size, drm_device_t * dev);
|
||||
extern void drm_ioremapfree(void *pt, unsigned long size,
|
||||
drm_device_t * dev);
|
||||
#endif
|
||||
|
|
|
@ -289,79 +289,6 @@ void drm_free_pages(unsigned long address, int order, int area)
|
|||
}
|
||||
}
|
||||
|
||||
void *drm_ioremap(unsigned long offset, unsigned long size, drm_device_t * dev)
|
||||
{
|
||||
void *pt;
|
||||
|
||||
if (!size) {
|
||||
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
|
||||
"Mapping 0 bytes at 0x%08lx\n", offset);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!(pt = drm_ioremap(offset, size, dev))) {
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return NULL;
|
||||
}
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
|
||||
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return pt;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_ioremap);
|
||||
|
||||
void *drm_ioremap_nocache(unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev)
|
||||
{
|
||||
void *pt;
|
||||
|
||||
if (!size) {
|
||||
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
|
||||
"Mapping 0 bytes at 0x%08lx\n", offset);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!(pt = drm_ioremap_nocache(offset, size, dev))) {
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return NULL;
|
||||
}
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
|
||||
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return pt;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_ioremap_nocache);
|
||||
|
||||
void drm_ioremapfree(void *pt, unsigned long size, drm_device_t * dev)
|
||||
{
|
||||
int alloc_count;
|
||||
int free_count;
|
||||
|
||||
if (!pt)
|
||||
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
|
||||
"Attempt to free NULL pointer\n");
|
||||
else
|
||||
drm_ioremapfree(pt, size, dev);
|
||||
|
||||
spin_lock(&drm_mem_lock);
|
||||
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size;
|
||||
free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count;
|
||||
alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
if (free_count > alloc_count) {
|
||||
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
|
||||
"Excess frees: %d frees, %d allocs\n",
|
||||
free_count, alloc_count);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_ioremapfree);
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
|
||||
DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type)
|
||||
|
|
|
@ -275,74 +275,6 @@ void drm_free_pages (unsigned long address, int order, int area) {
|
|||
}
|
||||
}
|
||||
|
||||
void *drm_ioremap (unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev) {
|
||||
void *pt;
|
||||
|
||||
if (!size) {
|
||||
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
|
||||
"Mapping 0 bytes at 0x%08lx\n", offset);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!(pt = drm_ioremap(offset, size, dev))) {
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return NULL;
|
||||
}
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
|
||||
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return pt;
|
||||
}
|
||||
|
||||
void *drm_ioremap_nocache (unsigned long offset, unsigned long size,
|
||||
drm_device_t * dev) {
|
||||
void *pt;
|
||||
|
||||
if (!size) {
|
||||
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
|
||||
"Mapping 0 bytes at 0x%08lx\n", offset);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!(pt = drm_ioremap_nocache(offset, size, dev))) {
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return NULL;
|
||||
}
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
|
||||
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
return pt;
|
||||
}
|
||||
|
||||
void drm_ioremapfree (void *pt, unsigned long size, drm_device_t * dev) {
|
||||
int alloc_count;
|
||||
int free_count;
|
||||
|
||||
if (!pt)
|
||||
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
|
||||
"Attempt to free NULL pointer\n");
|
||||
else
|
||||
drm_ioremapfree(pt, size, dev);
|
||||
|
||||
spin_lock(&drm_mem_lock);
|
||||
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size;
|
||||
free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count;
|
||||
alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
|
||||
spin_unlock(&drm_mem_lock);
|
||||
if (free_count > alloc_count) {
|
||||
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
|
||||
"Excess frees: %d frees, %d allocs\n",
|
||||
free_count, alloc_count);
|
||||
}
|
||||
}
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
|
||||
DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) {
|
||||
|
|
|
@ -349,7 +349,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
|
|||
map->size);
|
||||
DRM_DEBUG("mtrr_del = %d\n", retcode);
|
||||
}
|
||||
drm_ioremapfree(map->handle, map->size, dev);
|
||||
iounmap(map->handle);
|
||||
break;
|
||||
case _DRM_SHM:
|
||||
vfree(map->handle);
|
||||
|
|
|
@ -238,8 +238,7 @@ static int i810_dma_cleanup(drm_device_t * dev)
|
|||
(drm_i810_private_t *) dev->dev_private;
|
||||
|
||||
if (dev_priv->ring.virtual_start) {
|
||||
drm_ioremapfree((void *)dev_priv->ring.virtual_start,
|
||||
dev_priv->ring.Size, dev);
|
||||
drm_core_ioremapfree(&dev_priv->ring.map, dev);
|
||||
}
|
||||
if (dev_priv->hw_status_page) {
|
||||
pci_free_consistent(dev->pdev, PAGE_SIZE,
|
||||
|
@ -255,9 +254,9 @@ static int i810_dma_cleanup(drm_device_t * dev)
|
|||
for (i = 0; i < dma->buf_count; i++) {
|
||||
drm_buf_t *buf = dma->buflist[i];
|
||||
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
|
||||
|
||||
if (buf_priv->kernel_virtual && buf->total)
|
||||
drm_ioremapfree(buf_priv->kernel_virtual,
|
||||
buf->total, dev);
|
||||
drm_core_ioremapfree(&buf_priv->map, dev);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -330,8 +329,15 @@ static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv)
|
|||
|
||||
*buf_priv->in_use = I810_BUF_FREE;
|
||||
|
||||
buf_priv->kernel_virtual = drm_ioremap(buf->bus_address,
|
||||
buf->total, dev);
|
||||
buf_priv->map.offset = buf->bus_address;
|
||||
buf_priv->map.size = buf->total;
|
||||
buf_priv->map.type = _DRM_AGP;
|
||||
buf_priv->map.flags = 0;
|
||||
buf_priv->map.mtrr = 0;
|
||||
|
||||
drm_core_ioremap(&buf_priv->map, dev);
|
||||
buf_priv->kernel_virtual = buf_priv->map.handle;
|
||||
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -382,18 +388,24 @@ static int i810_dma_initialize(drm_device_t * dev,
|
|||
dev_priv->ring.End = init->ring_end;
|
||||
dev_priv->ring.Size = init->ring_size;
|
||||
|
||||
dev_priv->ring.virtual_start = drm_ioremap(dev->agp->base +
|
||||
init->ring_start,
|
||||
init->ring_size, dev);
|
||||
dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
|
||||
dev_priv->ring.map.size = init->ring_size;
|
||||
dev_priv->ring.map.type = _DRM_AGP;
|
||||
dev_priv->ring.map.flags = 0;
|
||||
dev_priv->ring.map.mtrr = 0;
|
||||
|
||||
if (dev_priv->ring.virtual_start == NULL) {
|
||||
drm_core_ioremap(&dev_priv->ring.map, dev);
|
||||
|
||||
if (dev_priv->ring.map.handle == NULL) {
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
i810_dma_cleanup(dev);
|
||||
DRM_ERROR("can not ioremap virtual address for"
|
||||
" ring buffer\n");
|
||||
return -ENOMEM;
|
||||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
|
||||
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
|
||||
|
||||
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
|
||||
|
||||
dev_priv->w = init->w;
|
||||
|
|
|
@ -61,6 +61,7 @@ typedef struct drm_i810_buf_priv {
|
|||
int currently_mapped;
|
||||
void *virtual;
|
||||
void *kernel_virtual;
|
||||
drm_local_map_t map;
|
||||
} drm_i810_buf_priv_t;
|
||||
|
||||
typedef struct _drm_i810_ring_buffer {
|
||||
|
@ -72,6 +73,7 @@ typedef struct _drm_i810_ring_buffer {
|
|||
int head;
|
||||
int tail;
|
||||
int space;
|
||||
drm_local_map_t map;
|
||||
} drm_i810_ring_buffer_t;
|
||||
|
||||
typedef struct drm_i810_private {
|
||||
|
|
|
@ -226,8 +226,7 @@ static int i830_dma_cleanup(drm_device_t * dev)
|
|||
(drm_i830_private_t *) dev->dev_private;
|
||||
|
||||
if (dev_priv->ring.virtual_start) {
|
||||
drm_ioremapfree((void *)dev_priv->ring.virtual_start,
|
||||
dev_priv->ring.Size, dev);
|
||||
drm_core_ioremapfree(&dev_priv->ring.map, dev);
|
||||
}
|
||||
if (dev_priv->hw_status_page) {
|
||||
pci_free_consistent(dev->pdev, PAGE_SIZE,
|
||||
|
@ -245,8 +244,7 @@ static int i830_dma_cleanup(drm_device_t * dev)
|
|||
drm_buf_t *buf = dma->buflist[i];
|
||||
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
|
||||
if (buf_priv->kernel_virtual && buf->total)
|
||||
drm_ioremapfree(buf_priv->kernel_virtual,
|
||||
buf->total, dev);
|
||||
drm_core_ioremapfree(&buf_priv->map, dev);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -323,8 +321,14 @@ static int i830_freelist_init(drm_device_t * dev, drm_i830_private_t * dev_priv)
|
|||
|
||||
*buf_priv->in_use = I830_BUF_FREE;
|
||||
|
||||
buf_priv->kernel_virtual = drm_ioremap(buf->bus_address,
|
||||
buf->total, dev);
|
||||
buf_priv->map.offset = buf->bus_address;
|
||||
buf_priv->map.size = buf->total;
|
||||
buf_priv->map.type = _DRM_AGP;
|
||||
buf_priv->map.flags = 0;
|
||||
buf_priv->map.mtrr = 0;
|
||||
|
||||
drm_core_ioremap(&buf_priv->map, dev);
|
||||
buf_priv->kernel_virtual = buf_priv->map.handle;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -376,18 +380,24 @@ static int i830_dma_initialize(drm_device_t * dev,
|
|||
dev_priv->ring.End = init->ring_end;
|
||||
dev_priv->ring.Size = init->ring_size;
|
||||
|
||||
dev_priv->ring.virtual_start = drm_ioremap(dev->agp->base +
|
||||
init->ring_start,
|
||||
init->ring_size, dev);
|
||||
dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
|
||||
dev_priv->ring.map.size = init->ring_size;
|
||||
dev_priv->ring.map.type = _DRM_AGP;
|
||||
dev_priv->ring.map.flags = 0;
|
||||
dev_priv->ring.map.mtrr = 0;
|
||||
|
||||
if (dev_priv->ring.virtual_start == NULL) {
|
||||
drm_core_ioremap(&dev_priv->ring.map, dev);
|
||||
|
||||
if (dev_priv->ring.map.handle == NULL) {
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
i830_dma_cleanup(dev);
|
||||
DRM_ERROR("can not ioremap virtual address for"
|
||||
" ring buffer\n");
|
||||
return -ENOMEM;
|
||||
return DRM_ERR(ENOMEM);
|
||||
}
|
||||
|
||||
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
|
||||
|
||||
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
|
||||
|
||||
dev_priv->w = init->w;
|
||||
|
|
|
@ -68,6 +68,7 @@ typedef struct drm_i830_buf_priv {
|
|||
int currently_mapped;
|
||||
void __user *virtual;
|
||||
void *kernel_virtual;
|
||||
drm_local_map_t map;
|
||||
} drm_i830_buf_priv_t;
|
||||
|
||||
typedef struct _drm_i830_ring_buffer {
|
||||
|
@ -79,6 +80,7 @@ typedef struct _drm_i830_ring_buffer {
|
|||
int head;
|
||||
int tail;
|
||||
int space;
|
||||
drm_local_map_t map;
|
||||
} drm_i830_ring_buffer_t;
|
||||
|
||||
typedef struct drm_i830_private {
|
||||
|
|
|
@ -9,7 +9,8 @@
|
|||
* between the contexts
|
||||
*/
|
||||
#define NV40_GRCTX_SIZE (175*1024)
|
||||
#define NV44_GRCTX_SIZE (25*1024)
|
||||
#define NV43_GRCTX_SIZE (70*1024)
|
||||
#define NV4E_GRCTX_SIZE (25*1024)
|
||||
|
||||
/*TODO: deciper what each offset in the context represents. The below
|
||||
* contexts are taken from dumps just after the 3D object is
|
||||
|
@ -155,7 +156,123 @@ static void nv40_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
|
|||
INSTANCE_WR(ctx, i/4, 0x3f800000);
|
||||
}
|
||||
|
||||
static void nv44_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
|
||||
static void
|
||||
nv43_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
|
||||
{
|
||||
drm_nouveau_private_t *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
|
||||
INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx));
|
||||
INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
|
||||
INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
|
||||
INSTANCE_WR(ctx, 0x00030/4, 0x00000001);
|
||||
INSTANCE_WR(ctx, 0x0011c/4, 0x20010001);
|
||||
INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00);
|
||||
INSTANCE_WR(ctx, 0x00128/4, 0x02008821);
|
||||
INSTANCE_WR(ctx, 0x00178/4, 0x00000040);
|
||||
INSTANCE_WR(ctx, 0x0017c/4, 0x00000040);
|
||||
INSTANCE_WR(ctx, 0x00180/4, 0x00000040);
|
||||
INSTANCE_WR(ctx, 0x00188/4, 0x00000040);
|
||||
INSTANCE_WR(ctx, 0x00194/4, 0x80000000);
|
||||
INSTANCE_WR(ctx, 0x00198/4, 0x80000000);
|
||||
INSTANCE_WR(ctx, 0x0019c/4, 0x80000000);
|
||||
INSTANCE_WR(ctx, 0x001a0/4, 0x80000000);
|
||||
INSTANCE_WR(ctx, 0x001a4/4, 0x80000000);
|
||||
INSTANCE_WR(ctx, 0x001a8/4, 0x80000000);
|
||||
INSTANCE_WR(ctx, 0x001ac/4, 0x80000000);
|
||||
INSTANCE_WR(ctx, 0x001b0/4, 0x80000000);
|
||||
INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c);
|
||||
INSTANCE_WR(ctx, 0x00340/4, 0x00040000);
|
||||
INSTANCE_WR(ctx, 0x00350/4, 0x55555555);
|
||||
INSTANCE_WR(ctx, 0x00354/4, 0x55555555);
|
||||
INSTANCE_WR(ctx, 0x00358/4, 0x55555555);
|
||||
INSTANCE_WR(ctx, 0x0035c/4, 0x55555555);
|
||||
INSTANCE_WR(ctx, 0x00388/4, 0x00000008);
|
||||
INSTANCE_WR(ctx, 0x0039c/4, 0x00001010);
|
||||
INSTANCE_WR(ctx, 0x003cc/4, 0x00000111);
|
||||
INSTANCE_WR(ctx, 0x003d0/4, 0x00080060);
|
||||
INSTANCE_WR(ctx, 0x003ec/4, 0x00000080);
|
||||
INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000);
|
||||
INSTANCE_WR(ctx, 0x003f4/4, 0x00000001);
|
||||
INSTANCE_WR(ctx, 0x00408/4, 0x46400000);
|
||||
INSTANCE_WR(ctx, 0x00418/4, 0xffff0000);
|
||||
INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000);
|
||||
INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000);
|
||||
INSTANCE_WR(ctx, 0x00430/4, 0x00011100);
|
||||
for (i=0x0044c; i<=0x00488; i+=4)
|
||||
INSTANCE_WR(ctx, i/4, 0x07ff0000);
|
||||
INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff);
|
||||
INSTANCE_WR(ctx, 0x004bc/4, 0x30201000);
|
||||
INSTANCE_WR(ctx, 0x004c0/4, 0x70605040);
|
||||
INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888);
|
||||
INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8);
|
||||
INSTANCE_WR(ctx, 0x004dc/4, 0x40100000);
|
||||
INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff);
|
||||
INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6);
|
||||
INSTANCE_WR(ctx, 0x00530/4, 0x2155b699);
|
||||
INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98);
|
||||
INSTANCE_WR(ctx, 0x00538/4, 0x00000098);
|
||||
INSTANCE_WR(ctx, 0x00548/4, 0xffffffff);
|
||||
INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000);
|
||||
INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff);
|
||||
INSTANCE_WR(ctx, 0x00560/4, 0x00ff0000);
|
||||
INSTANCE_WR(ctx, 0x00598/4, 0x00ffff00);
|
||||
for (i=0x005dc; i<=0x00618; i+=4)
|
||||
INSTANCE_WR(ctx, i/4, 0x00018488);
|
||||
for (i=0x0061c; i<=0x00658; i+=4)
|
||||
INSTANCE_WR(ctx, i/4, 0x00028202);
|
||||
for (i=0x0069c; i<=0x006d8; i+=4)
|
||||
INSTANCE_WR(ctx, i/4, 0x0000aae4);
|
||||
for (i=0x006dc; i<=0x00718; i+=4)
|
||||
INSTANCE_WR(ctx, i/4, 0x01012000);
|
||||
for (i=0x0071c; i<=0x00758; i+=4)
|
||||
INSTANCE_WR(ctx, i/4, 0x00080008);
|
||||
for (i=0x0079c; i<=0x007d8; i+=4)
|
||||
INSTANCE_WR(ctx, i/4, 0x00100008);
|
||||
for (i=0x0082c; i<=0x00838; i+=4)
|
||||
INSTANCE_WR(ctx, i/4, 0x0001bc80);
|
||||
for (i=0x0083c; i<=0x00848; i+=4)
|
||||
INSTANCE_WR(ctx, i/4, 0x00000202);
|
||||
for (i=0x0085c; i<=0x00868; i+=4)
|
||||
INSTANCE_WR(ctx, i/4, 0x00000008);
|
||||
for (i=0x0087c; i<=0x00888; i+=4)
|
||||
INSTANCE_WR(ctx, i/4, 0x00080008);
|
||||
INSTANCE_WR(ctx, 0x0089c/4, 0x00000002);
|
||||
INSTANCE_WR(ctx, 0x008d0/4, 0x00000021);
|
||||
INSTANCE_WR(ctx, 0x008d4/4, 0x030c30c3);
|
||||
INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200);
|
||||
INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff);
|
||||
INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00);
|
||||
INSTANCE_WR(ctx, 0x008f4/4, 0x00020000);
|
||||
INSTANCE_WR(ctx, 0x0092c/4, 0x00008100);
|
||||
INSTANCE_WR(ctx, 0x009b8/4, 0x00000001);
|
||||
INSTANCE_WR(ctx, 0x009fc/4, 0x00001001);
|
||||
INSTANCE_WR(ctx, 0x00a04/4, 0x00000003);
|
||||
INSTANCE_WR(ctx, 0x00a08/4, 0x00888001);
|
||||
INSTANCE_WR(ctx, 0x00a8c/4, 0x00000005);
|
||||
INSTANCE_WR(ctx, 0x00a98/4, 0x0000ffff);
|
||||
INSTANCE_WR(ctx, 0x00ab4/4, 0x00005555);
|
||||
INSTANCE_WR(ctx, 0x00ab8/4, 0x00005555);
|
||||
INSTANCE_WR(ctx, 0x00abc/4, 0x00005555);
|
||||
INSTANCE_WR(ctx, 0x00ac0/4, 0x00000001);
|
||||
INSTANCE_WR(ctx, 0x00af8/4, 0x00000001);
|
||||
for (i=0x02ec0; i<=0x02f38; i+=8)
|
||||
INSTANCE_WR(ctx, i/4, 0x3f800000);
|
||||
for (i=0x04c80; i<=0x06e70; i+=24)
|
||||
INSTANCE_WR(ctx, i/4, 0x00000001);
|
||||
for (i=0x06e80; i<=0x07270; i+=16)
|
||||
INSTANCE_WR(ctx, i/4, 0x3f800000);
|
||||
for (i=0x096c0; i<=0x0b8b0; i+=24)
|
||||
INSTANCE_WR(ctx, i/4, 0x00000001);
|
||||
for (i=0x0b8c0; i<=0x0bcb0; i+=16)
|
||||
INSTANCE_WR(ctx, i/4, 0x3f800000);
|
||||
for (i=0x0e100; i<=0x102f0; i+=24)
|
||||
INSTANCE_WR(ctx, i/4, 0x00000001);
|
||||
for (i=0x10300; i<=0x106f0; i+=16)
|
||||
INSTANCE_WR(ctx, i/4, 0x3f800000);
|
||||
};
|
||||
|
||||
static void nv4e_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
|
||||
{
|
||||
drm_nouveau_private_t *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
|
@ -267,13 +384,29 @@ nv40_graph_context_create(drm_device_t *dev, int channel)
|
|||
drm_nouveau_private_t *dev_priv =
|
||||
(drm_nouveau_private_t *)dev->dev_private;
|
||||
struct nouveau_fifo *chan = &dev_priv->fifos[channel];
|
||||
void (*ctx_init)(drm_device_t *, struct mem_block *);
|
||||
unsigned int ctx_size;
|
||||
int i;
|
||||
int i, chipset;
|
||||
|
||||
if (dev_priv->card_type == NV_40)
|
||||
chipset = (NV_READ(NV_PMC_BOOT_0) & 0x0ff00000) >> 20;
|
||||
switch (chipset) {
|
||||
case 0x40:
|
||||
ctx_size = NV40_GRCTX_SIZE;
|
||||
else
|
||||
ctx_size = NV44_GRCTX_SIZE;
|
||||
ctx_init = nv40_graph_context_init;
|
||||
break;
|
||||
case 0x43:
|
||||
ctx_size = NV43_GRCTX_SIZE;
|
||||
ctx_init = nv43_graph_context_init;
|
||||
break;
|
||||
case 0x4e:
|
||||
ctx_size = NV4E_GRCTX_SIZE;
|
||||
ctx_init = nv4e_graph_context_init;
|
||||
break;
|
||||
default:
|
||||
ctx_size = NV40_GRCTX_SIZE;
|
||||
ctx_init = nv40_graph_context_init;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Alloc and clear RAMIN to store the context */
|
||||
chan->ramin_grctx = nouveau_instmem_alloc(dev, ctx_size, 4);
|
||||
|
@ -283,14 +416,7 @@ nv40_graph_context_create(drm_device_t *dev, int channel)
|
|||
INSTANCE_WR(chan->ramin_grctx, i/4, 0x00000000);
|
||||
|
||||
/* Initialise default context values */
|
||||
if (dev_priv->card_type == NV_40)
|
||||
nv40_graph_context_init(dev, chan->ramin_grctx);
|
||||
else {
|
||||
/*XXX: this context was pulled from a c51 card. no idea if it
|
||||
* is usable on a "real" nv44...
|
||||
*/
|
||||
nv44_graph_context_init(dev, chan->ramin_grctx);
|
||||
}
|
||||
ctx_init(dev, chan->ramin_grctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -430,7 +556,40 @@ static uint32_t nv40_ctx_voodoo[] = {
|
|||
~0
|
||||
};
|
||||
|
||||
static uint32_t c51_ctx_voodoo[] = {
|
||||
static uint32_t nv43_ctx_voodoo[] = {
|
||||
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
|
||||
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06,
|
||||
0x0040a868, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
|
||||
0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061,
|
||||
0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
|
||||
0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
|
||||
0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
|
||||
0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
|
||||
0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
|
||||
0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407ce6, 0x002000f1,
|
||||
0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b,
|
||||
0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6,
|
||||
0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700,
|
||||
0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003,
|
||||
0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200233, 0x0060000a,
|
||||
0x00104800, 0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965,
|
||||
0x00148a00, 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04,
|
||||
0x0010cd08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06,
|
||||
0x002002c8, 0x0060000a, 0x00300000, 0x00200680, 0x00407200, 0x00200684,
|
||||
0x00800001, 0x00200b10, 0x0060000a, 0x00203870, 0x0040788a, 0x00201350,
|
||||
0x00800041, 0x00407c84, 0x00201560, 0x00800002, 0x00408d00, 0x00600006,
|
||||
0x00700003, 0x004086e6, 0x00700080, 0x002002c8, 0x0060000a, 0x00200004,
|
||||
0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a884,
|
||||
0x00700002, 0x00600004, 0x0040a868, 0x00700000, 0x00200000, 0x0060000a,
|
||||
0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060,
|
||||
0x00600007, 0x00409988, 0x0060000f, 0x00000000, 0x00500060, 0x00200000,
|
||||
0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe,
|
||||
0x00940400, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68,
|
||||
0x0040aa06, 0x0040ab05, 0x00600009, 0x00700005, 0x00700006, 0x0060000e,
|
||||
~0
|
||||
};
|
||||
|
||||
static uint32_t nv4e_ctx_voodoo[] = {
|
||||
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
|
||||
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06,
|
||||
0x0040a868, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
|
||||
|
@ -477,7 +636,8 @@ nv40_graph_init(drm_device_t *dev)
|
|||
DRM_DEBUG("chipset (from PMC_BOOT_0): NV%02X\n", chipset);
|
||||
switch (chipset) {
|
||||
case 0x40: ctx_voodoo = nv40_ctx_voodoo; break;
|
||||
case 0x4e: ctx_voodoo = c51_ctx_voodoo; break;
|
||||
case 0x43: ctx_voodoo = nv43_ctx_voodoo; break;
|
||||
case 0x4e: ctx_voodoo = nv4e_ctx_voodoo; break;
|
||||
default:
|
||||
DRM_ERROR("Unknown ctx_voodoo for chipset 0x%02x\n", chipset);
|
||||
ctx_voodoo = NULL;
|
||||
|
|
Loading…
Reference in New Issue