Merge branch 'origin' into radeon-ttm

Conflicts:

	shared-core/radeon_drv.h
main
Dave Airlie 2007-06-05 10:09:11 +10:00
commit 07345af838
71 changed files with 1544 additions and 1207 deletions

71
README
View File

@ -1,4 +1,69 @@
By default, this will install into /usr/local. If you want to install this
libdrm to replace your system copy, say:
./configure --prefix=/usr --exec-prefix=/
DRM README file
There are two main parts to this package: the DRM client library/interface
(libdrm.so) and kernel/hardware-specific device modules (such as i915.ko).
Compiling
---------
By default, libdrm and the DRM header files will install into /usr/local/.
If you want to install this DRM to replace your system copy, say:
./configure --prefix=/usr --exec-prefix=/
Then,
make install
To build the device-specific kernel modules:
cd linux-core/
make
cp *.ko /lib/modules/VERSION/kernel/drivers/char/drm/
(where VERSION is your kernel version: uname -f)
Or,
cd bsd-core/
make
copy the kernel modules to the appropriate place
Tips & Trouble-shooting
-----------------------
1. You'll need kernel sources. If using Fedora Core 5, for example, you may
need to install RPMs such as:
kernel-smp-devel-2.6.15-1.2054_FC5.i686.rpm
kernel-devel-2.6.15-1.2054_FC5.i686.rpm
etc.
2. You may need to make a symlink from /lib/modules/VERSION/build to your
kernel sources in /usr/src/kernels/VERSION (where version is `uname -r`):
cd /lib/modules/VERSION
ln -s /usr/src/kernels/VERSION build
3. If you've build the kernel modules but they won't load because of an
error like this:
$ /sbin/modprobe drm
FATAL: Error inserting drm (/lib/modules/2.6.15-1.2054_FC5smp/kernel/drivers/char/drm/drm.ko): Invalid module format
And 'dmesg|tail' says:
drm: disagrees about version of symbol struct_module
Try recompiling your drm modules without the Module.symvers file.
That is rm the /usr/src/kernels/2.6.15-1.2054_FC5-smp-i686/Module.symvers
file (or rename it). Then do a 'make clean' before rebuilding your drm
modules.

View File

@ -69,4 +69,3 @@ drm_pciids.h: ${SHARED}/drm_pciids.txt
${SHAREDFILES}:
ln -sf ${SHARED}/$@ $@

View File

@ -32,8 +32,6 @@
#include "drmP.h"
#define ATI_PCIGART_PAGE_SIZE 4096 /* PCI GART page size */
#define ATI_MAX_PCIGART_PAGES 8192 /* 32 MB aperture, 4K pages */
#define ATI_PCIGART_TABLE_SIZE 32768
int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
{
@ -48,7 +46,7 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
/* GART table in system memory */
dev->sg->dmah = drm_pci_alloc(dev, ATI_PCIGART_TABLE_SIZE, 0,
dev->sg->dmah = drm_pci_alloc(dev, gart_info->table_size, 0,
0xfffffffful);
if (dev->sg->dmah == NULL) {
DRM_ERROR("cannot allocate PCI GART table!\n");
@ -63,9 +61,9 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
pci_gart = gart_info->addr;
}
pages = DRM_MIN(dev->sg->pages, ATI_MAX_PCIGART_PAGES);
pages = DRM_MIN(dev->sg->pages, gart_info->table_size / sizeof(u32));
bzero(pci_gart, ATI_PCIGART_TABLE_SIZE);
bzero(pci_gart, gart_info->table_size);
KASSERT(PAGE_SIZE >= ATI_PCIGART_PAGE_SIZE, ("page size too small"));
@ -73,10 +71,17 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
page_base = (u32) dev->sg->busaddr[i];
for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
if (gart_info->is_pcie)
*pci_gart = (cpu_to_le32(page_base) >> 8) | 0xc;
else
switch(gart_info->gart_reg_if) {
case DRM_ATI_GART_IGP:
*pci_gart = cpu_to_le32(page_base | 0xc);
break;
case DRM_ATI_GART_PCIE:
*pci_gart = cpu_to_le32((page_base >> 8) | 0xc);
break;
default:
*pci_gart = cpu_to_le32(page_base);
break;
}
pci_gart++;
page_base += ATI_PCIGART_PAGE_SIZE;
}

View File

@ -47,6 +47,9 @@ typedef struct drm_file drm_file_t;
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/stat.h>
#if __FreeBSD_version >= 700000
#include <sys/priv.h>
#endif
#include <sys/proc.h>
#include <sys/lock.h>
#include <sys/fcntl.h>
@ -230,7 +233,11 @@ enum {
#define PAGE_ALIGN(addr) round_page(addr)
/* DRM_SUSER returns true if the user is superuser */
#if __FreeBSD_version >= 700000
#define DRM_SUSER(p) (priv_check(p, PRIV_DRIVER) == 0)
#else
#define DRM_SUSER(p) (suser(p) == 0)
#endif
#define DRM_AGP_FIND_DEVICE() agp_find_device()
#define DRM_MTRR_WC MDF_WRITECOMBINE
#define jiffies ticks
@ -394,19 +401,6 @@ do { \
} \
} while (0)
#define DRM_GETSAREA() \
do { \
drm_local_map_t *map; \
DRM_SPINLOCK_ASSERT(&dev->dev_lock); \
TAILQ_FOREACH(map, &dev->maplist, link) { \
if (map->type == _DRM_SHM && \
map->flags & _DRM_CONTAINS_LOCK) { \
dev_priv->sarea = map; \
break; \
} \
} \
} while (0)
#if defined(__FreeBSD__) && __FreeBSD_version > 500000
#define DRM_WAIT_ON( ret, queue, timeout, condition ) \
for ( ret = 0 ; !ret && !(condition) ; ) { \
@ -627,12 +621,17 @@ typedef struct drm_vbl_sig {
#define DRM_ATI_GART_MAIN 1
#define DRM_ATI_GART_FB 2
#define DRM_ATI_GART_PCI 1
#define DRM_ATI_GART_PCIE 2
#define DRM_ATI_GART_IGP 3
typedef struct ati_pcigart_info {
int gart_table_location;
int is_pcie;
int gart_reg_if;
void *addr;
dma_addr_t bus_addr;
drm_local_map_t mapping;
int table_size;
} drm_ati_pcigart_info;
struct drm_driver_info {
@ -822,6 +821,7 @@ dev_type_read(drm_read);
dev_type_poll(drm_poll);
dev_type_mmap(drm_mmap);
#endif
extern drm_local_map_t *drm_getsarea(drm_device_t *dev);
/* File operations helpers (drm_fops.c) */
#ifdef __FreeBSD__

View File

@ -43,7 +43,7 @@ static int
drm_device_find_capability(drm_device_t *dev, int cap)
{
#ifdef __FreeBSD__
#if __FreeBSD_version >= 700010
#if __FreeBSD_version >= 602102
return (pci_find_extcap(dev->device, cap, NULL) == 0);
#else

View File

@ -912,6 +912,18 @@ int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
return DRM_ERR(retcode);
}
drm_local_map_t *drm_getsarea(drm_device_t *dev)
{
drm_local_map_t *map;
DRM_SPINLOCK_ASSERT(&dev->dev_lock);
TAILQ_FOREACH(map, &dev->maplist, link) {
if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK))
return map;
}
return NULL;
}
#if DRM_LINUX

File diff suppressed because it is too large Load Diff

View File

@ -31,8 +31,6 @@
*
*/
/* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/xf86drm.h,v 1.26 2003/08/16 19:26:37 dawes Exp $ */
#ifndef _XF86DRM_H_
#define _XF86DRM_H_

View File

@ -25,8 +25,6 @@
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
*
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/xf86drmHash.c,v 1.4 2001/03/21 18:08:54 dawes Exp $
*
* DESCRIPTION
*
* This file contains a straightforward implementation of a fixed-sized
@ -70,25 +68,14 @@
*
*/
#ifdef HAVE_XORG_CONFIG_H
#include <xorg-config.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#define HASH_MAIN 0
#if HASH_MAIN
# include <stdio.h>
# include <stdlib.h>
#else
#if !HASH_MAIN
# include "drm.h"
# include "xf86drm.h"
# ifdef XFree86LOADER
# include "xf86.h"
# include "xf86_ansic.h"
# else
# include <stdio.h>
# include <stdlib.h>
# endif
#endif
#define HASH_MAGIC 0xdeadbeef

View File

@ -25,8 +25,6 @@
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
*
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/xf86drmRandom.c,v 1.4 2000/06/17 00:03:34 martin Exp $
*
* DESCRIPTION
*
* This file contains a simple, straightforward implementation of the Park
@ -73,25 +71,14 @@
*
*/
#ifdef HAVE_XORG_CONFIG_H
#include <xorg-config.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#define RANDOM_MAIN 0
#if RANDOM_MAIN
# include <stdio.h>
# include <stdlib.h>
#else
#if !RANDOM_MAIN
# include "drm.h"
# include "xf86drm.h"
# ifdef XFree86LOADER
# include "xf86.h"
# include "xf86_ansic.h"
# else
# include <stdio.h>
# include <stdlib.h>
# endif
#endif
#define RANDOM_MAGIC 0xfeedbeef

View File

@ -25,8 +25,6 @@
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
*
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/xf86drmSL.c,v 1.3 2000/06/17 00:03:34 martin Exp $
*
* DESCRIPTION
*
* This file contains a straightforward skip list implementation.n
@ -40,26 +38,16 @@
*
*/
#ifdef HAVE_XORG_CONFIG_H
#include <xorg-config.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#define SL_MAIN 0
#if SL_MAIN
# include <stdio.h>
# include <stdlib.h>
# include <sys/time.h>
#else
#if !SL_MAIN
# include "drm.h"
# include "xf86drm.h"
# ifdef XFree86LOADER
# include "xf86.h"
# include "xf86_ansic.h"
# else
# include <stdio.h>
# include <stdlib.h>
# endif
#else
# include <sys/time.h>
#endif
#define SL_LIST_MAGIC 0xfacade00LU

View File

@ -60,7 +60,7 @@ typedef struct _drmMMListHead
(__item)->next = (__item); \
} while (0)
#define DRMLISTADD(__item, __list) \
#define DRMLISTADD(__item, __list) \
do { \
(__item)->prev = (__list); \
(__item)->next = (__list)->next; \
@ -93,16 +93,18 @@ typedef struct _drmMMListHead
#define DRMLISTENTRY(__type, __item, __field) \
((__type *)(((char *) (__item)) - offsetof(__type, __field)))
typedef struct _drmFence{
unsigned handle;
int class;
unsigned type;
unsigned flags;
unsigned signaled;
unsigned pad[4]; /* for future expansion */
typedef struct _drmFence
{
unsigned handle;
int class;
unsigned type;
unsigned flags;
unsigned signaled;
unsigned pad[4]; /* for future expansion */
} drmFence;
typedef struct _drmBO{
typedef struct _drmBO
{
drm_bo_type_t type;
unsigned handle;
drm_u64_t mapHandle;
@ -121,8 +123,8 @@ typedef struct _drmBO{
unsigned pad[8]; /* for future expansion */
} drmBO;
typedef struct _drmBONode {
typedef struct _drmBONode
{
drmMMListHead head;
drmBO *buf;
drm_bo_arg_t bo_arg;
@ -138,22 +140,24 @@ typedef struct _drmBOList {
drmMMListHead free;
} drmBOList;
/* Fencing */
extern int drmFenceCreate(int fd, unsigned flags, int class,
unsigned type,
drmFence *fence);
extern int drmFenceDestroy(int fd, const drmFence *fence);
extern int drmFenceReference(int fd, unsigned handle, drmFence *fence);
extern int drmFenceUnreference(int fd, const drmFence *fence);
extern int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type);
extern int drmFenceSignaled(int fd, drmFence *fence,
unsigned fenceType, int *signaled);
extern int drmFenceWait(int fd, unsigned flags, drmFence *fence,
unsigned flush_type);
extern int drmFenceEmit(int fd, unsigned flags, drmFence *fence,
unsigned emit_type);
extern int drmFenceBuffers(int fd, unsigned flags, drmFence *fence);
/*
* Fence functions.
*/
extern int drmFenceCreate(int fd, unsigned flags, int class,
unsigned type, drmFence *fence);
extern int drmFenceDestroy(int fd, const drmFence *fence);
extern int drmFenceReference(int fd, unsigned handle, drmFence *fence);
extern int drmFenceUnreference(int fd, const drmFence *fence);
extern int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type);
extern int drmFenceSignaled(int fd, drmFence *fence,
unsigned fenceType, int *signaled);
extern int drmFenceWait(int fd, unsigned flags, drmFence *fence,
unsigned flush_type);
extern int drmFenceEmit(int fd, unsigned flags, drmFence *fence,
unsigned emit_type);
extern int drmFenceBuffers(int fd, unsigned flags, drmFence *fence);
/*

View File

@ -237,6 +237,9 @@ dristat: dristat.c
drmstat: drmstat.c
$(CC) $(PRGCFLAGS) $< -o $@ $(DRMSTATLIBS)
install:
make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules_install
else
# Check for kernel versions that we don't support.
@ -280,6 +283,7 @@ CONFIG_DRM_SAVAGE := n
CONFIG_DRM_VIA := n
CONFIG_DRM_MACH64 := n
CONFIG_DRM_NV := n
CONFIG_DRM_NOUVEAU := n
# Enable module builds for the modules requested/supported.

View File

@ -151,7 +151,8 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
order = drm_order((gart_info->table_size + (PAGE_SIZE-1)) / PAGE_SIZE);
order = drm_order((gart_info->table_size +
(PAGE_SIZE-1)) / PAGE_SIZE);
num_pages = 1 << order;
address = drm_ati_alloc_pcigart_table(order);
if (!address) {
@ -169,7 +170,8 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
PCI_DMA_TODEVICE);
if (bus_address == 0) {
DRM_ERROR("unable to map PCIGART pages!\n");
order = drm_order((gart_info->table_size + (PAGE_SIZE-1)) / PAGE_SIZE);
order = drm_order((gart_info->table_size +
(PAGE_SIZE-1)) / PAGE_SIZE);
drm_ati_free_pcigart_table(address, order);
address = NULL;
goto done;
@ -205,10 +207,18 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
page_base = (u32) entry->busaddr[i];
for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
if (gart_info->is_pcie)
switch(gart_info->gart_reg_if) {
case DRM_ATI_GART_IGP:
*pci_gart = cpu_to_le32((page_base) | 0xc);
break;
case DRM_ATI_GART_PCIE:
*pci_gart = cpu_to_le32((page_base >> 8) | 0xc);
else
break;
default:
case DRM_ATI_GART_PCI:
*pci_gart = cpu_to_le32(page_base);
break;
}
pci_gart++;
page_base += ATI_PCIGART_PAGE_SIZE;
}

View File

@ -76,6 +76,7 @@
#include <asm/pgalloc.h>
#include "drm.h"
#include <linux/slab.h>
#include <linux/idr.h>
#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
@ -300,19 +301,14 @@ typedef struct drm_devstate {
} drm_devstate_t;
typedef struct drm_magic_entry {
drm_hash_item_t hash_item;
struct list_head head;
drm_hash_item_t hash_item;
struct drm_file *priv;
} drm_magic_entry_t;
typedef struct drm_magic_head {
struct drm_magic_entry *head;
struct drm_magic_entry *tail;
} drm_magic_head_t;
typedef struct drm_vma_entry {
struct list_head head;
struct vm_area_struct *vma;
struct drm_vma_entry *next;
pid_t pid;
} drm_vma_entry_t;
@ -411,8 +407,7 @@ typedef struct drm_file {
uid_t uid;
drm_magic_t magic;
unsigned long ioctl_count;
struct drm_file *next;
struct drm_file *prev;
struct list_head lhead;
struct drm_head *head;
int remove_auth_on_close;
unsigned long lock_count;
@ -493,8 +488,7 @@ typedef struct drm_agp_mem {
DRM_AGP_MEM *memory;
unsigned long bound; /**< address */
int pages;
struct drm_agp_mem *prev; /**< previous entry */
struct drm_agp_mem *next; /**< next entry */
struct list_head head;
} drm_agp_mem_t;
/**
@ -504,7 +498,7 @@ typedef struct drm_agp_mem {
*/
typedef struct drm_agp_head {
DRM_AGP_KERN agp_info; /**< AGP device information */
drm_agp_mem_t *memory; /**< memory entries */
struct list_head memory;
unsigned long mode; /**< AGP mode */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,11)
struct agp_bridge_data *bridge;
@ -576,6 +570,12 @@ typedef struct drm_ctx_list {
drm_file_t *tag; /**< associated fd private data */
} drm_ctx_list_t;
struct drm_ctx_sarea_list {
struct list_head head;
int ctx_id;
drm_map_t *map;
};
typedef struct drm_vbl_sig {
struct list_head head;
unsigned int sequence;
@ -587,15 +587,24 @@ typedef struct drm_vbl_sig {
#define DRM_ATI_GART_MAIN 1
#define DRM_ATI_GART_FB 2
#define DRM_ATI_GART_PCI 1
#define DRM_ATI_GART_PCIE 2
#define DRM_ATI_GART_IGP 3
typedef struct ati_pcigart_info {
int gart_table_location;
int is_pcie;
int gart_reg_if;
void *addr;
dma_addr_t bus_addr;
drm_local_map_t mapping;
int table_size;
} drm_ati_pcigart_info;
struct drm_drawable_list {
struct list_head head;
int id;
drm_drawable_info_t info;
};
#include "drm_objects.h"
@ -722,15 +731,14 @@ typedef struct drm_device {
/** \name Authentication */
/*@{ */
drm_file_t *file_first; /**< file list head */
drm_file_t *file_last; /**< file list tail */
struct list_head filelist;
drm_open_hash_t magiclist;
struct list_head magicfree;
/*@} */
/** \name Memory management */
/*@{ */
drm_map_list_t *maplist; /**< Linked list of regions */
struct list_head maplist; /**< Linked list of regions */
int map_count; /**< Number of mappable regions */
drm_open_hash_t map_hash; /**< User token hash table for maps */
drm_mm_t offset_manager; /**< User token manager */
@ -740,14 +748,14 @@ typedef struct drm_device {
/** \name Context handle management */
/*@{ */
drm_ctx_list_t *ctxlist; /**< Linked list of context handles */
struct list_head ctxlist; /**< Linked list of context handles */
int ctx_count; /**< Number of context handles */
struct mutex ctxlist_mutex; /**< For ctxlist */
drm_map_t **context_sareas; /**< per-context SAREA's */
int max_context;
struct idr ctx_idr;
struct list_head context_sarealist;
drm_vma_entry_t *vmalist; /**< List of vmas (for debugging) */
struct list_head vmalist; /**< List of vmas (for debugging) */
drm_lock_data_t lock; /**< Information on hardware lock */
/*@} */
@ -783,8 +791,8 @@ typedef struct drm_device {
atomic_t vbl_received;
atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */
spinlock_t vbl_lock;
drm_vbl_sig_t vbl_sigs; /**< signal list to send on VBLANK */
drm_vbl_sig_t vbl_sigs2; /**< signals to send on secondary VBLANK */
struct list_head vbl_sigs; /**< signal list to send on VBLANK */
struct list_head vbl_sigs2; /**< signals to send on secondary VBLANK */
unsigned int vbl_pending;
spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
void (*locked_tasklet_func)(struct drm_device *dev);
@ -806,7 +814,6 @@ typedef struct drm_device {
struct pci_controller *hose;
#endif
drm_sg_mem_t *sg; /**< Scatter gather memory */
unsigned long *ctx_bitmap; /**< context bitmap */
void *dev_private; /**< device private data */
drm_sigdata_t sigdata; /**< For block_all_signals */
sigset_t sigmask;
@ -822,22 +829,18 @@ typedef struct drm_device {
/** \name Drawable information */
/*@{ */
spinlock_t drw_lock;
unsigned int drw_bitfield_length;
u32 *drw_bitfield;
unsigned int drw_info_length;
drm_drawable_info_t **drw_info;
struct idr drw_idr;
struct list_head drwlist;
/*@} */
} drm_device_t;
#if __OS_HAS_AGP
typedef struct drm_agp_ttm_priv {
typedef struct drm_agp_ttm_backend {
drm_ttm_backend_t backend;
DRM_AGP_MEM *mem;
struct agp_bridge_data *bridge;
unsigned alloc_type;
unsigned cached_type;
unsigned uncached_type;
int populated;
} drm_agp_ttm_priv;
} drm_agp_ttm_backend_t;
#endif
#define ATI_PCIGART_FLAG_VMALLOC 1
@ -1125,8 +1128,7 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size
extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
extern drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
drm_ttm_backend_t *backend);
extern drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev);
/* Stub support (drm_stub.h) */
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver);
@ -1138,6 +1140,8 @@ extern drm_head_t **drm_heads;
extern struct drm_sysfs_class *drm_class;
extern struct proc_dir_entry *drm_proc_root;
extern drm_local_map_t *drm_getsarea(struct drm_device *dev);
/* Proc support (drm_proc.h) */
extern int drm_proc_init(drm_device_t * dev,
int minor,
@ -1200,7 +1204,7 @@ static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
unsigned int token)
{
drm_map_list_t *_entry;
list_for_each_entry(_entry, &dev->maplist->head, head)
list_for_each_entry(_entry, &dev->maplist, head)
if (_entry->user_token == token)
return _entry->map;
return NULL;

View File

@ -249,11 +249,7 @@ int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request)
entry->memory = memory;
entry->bound = 0;
entry->pages = pages;
entry->prev = NULL;
entry->next = dev->agp->memory;
if (dev->agp->memory)
dev->agp->memory->prev = entry;
dev->agp->memory = entry;
list_add(&entry->head, &dev->agp->memory);
request->handle = entry->handle;
request->physical = memory->physical;
@ -280,10 +276,12 @@ int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp,
return err;
if (copy_to_user(argp, &request, sizeof(request))) {
drm_agp_mem_t *entry = dev->agp->memory;
dev->agp->memory = entry->next;
dev->agp->memory->prev = NULL;
drm_agp_mem_t *entry;
list_for_each_entry(entry, &dev->agp->memory, head) {
if (entry->handle == request.handle)
break;
}
list_del(&entry->head);
drm_free_agp(entry->memory, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
return -EFAULT;
@ -306,7 +304,7 @@ static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev,
{
drm_agp_mem_t *entry;
for (entry = dev->agp->memory; entry; entry = entry->next) {
list_for_each_entry(entry, &dev->agp->memory, head) {
if (entry->handle == handle)
return entry;
}
@ -435,13 +433,7 @@ int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request)
if (entry->bound)
drm_unbind_agp(entry->memory);
if (entry->prev)
entry->prev->next = entry->next;
else
dev->agp->memory = entry->next;
if (entry->next)
entry->next->prev = entry->prev;
list_del(&entry->head);
drm_free_agp(entry->memory, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
@ -502,7 +494,7 @@ drm_agp_head_t *drm_agp_init(drm_device_t *dev)
drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS);
return NULL;
}
head->memory = NULL;
INIT_LIST_HEAD(&head->memory);
head->cant_use_aperture = head->agp_info.cant_use_aperture;
head->page_mask = head->agp_info.page_mask;
return head;
@ -570,7 +562,8 @@ static int drm_agp_needs_unbind_cache_adjust(drm_ttm_backend_t *backend) {
static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages,
struct page **pages) {
drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
drm_agp_ttm_backend_t *agp_be =
container_of(backend, drm_agp_ttm_backend_t, backend);
struct page **cur_page, **last_page = pages + num_pages;
DRM_AGP_MEM *mem;
@ -579,9 +572,9 @@ static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages,
DRM_DEBUG("drm_agp_populate_ttm\n");
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
mem = drm_agp_allocate_memory(num_pages, agp_priv->alloc_type);
mem = drm_agp_allocate_memory(num_pages, AGP_USER_MEMORY);
#else
mem = drm_agp_allocate_memory(agp_priv->bridge, num_pages, agp_priv->alloc_type);
mem = drm_agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
#endif
if (!mem) {
drm_free_memctl(num_pages *sizeof(void *));
@ -593,7 +586,7 @@ static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages,
for (cur_page = pages; cur_page < last_page; ++cur_page) {
mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(*cur_page));
}
agp_priv->mem = mem;
agp_be->mem = mem;
return 0;
}
@ -601,76 +594,82 @@ static int drm_agp_bind_ttm(drm_ttm_backend_t *backend,
unsigned long offset,
int cached)
{
drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
DRM_AGP_MEM *mem = agp_priv->mem;
drm_agp_ttm_backend_t *agp_be =
container_of(backend, drm_agp_ttm_backend_t, backend);
DRM_AGP_MEM *mem = agp_be->mem;
int ret;
DRM_DEBUG("drm_agp_bind_ttm\n");
DRM_FLAG_MASKED(backend->flags, (cached) ? DRM_BE_FLAG_BOUND_CACHED : 0,
DRM_BE_FLAG_BOUND_CACHED);
mem->is_flushed = TRUE;
mem->type = (cached) ? agp_priv->cached_type : agp_priv->uncached_type;
mem->type = (cached) ? AGP_USER_CACHED_MEMORY :
AGP_USER_MEMORY;
ret = drm_agp_bind_memory(mem, offset);
if (ret) {
DRM_ERROR("AGP Bind memory failed\n");
}
DRM_FLAG_MASKED(backend->flags, (cached) ? DRM_BE_FLAG_BOUND_CACHED : 0,
DRM_BE_FLAG_BOUND_CACHED);
return ret;
}
static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) {
drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
drm_agp_ttm_backend_t *agp_be =
container_of(backend, drm_agp_ttm_backend_t, backend);
DRM_DEBUG("drm_agp_unbind_ttm\n");
if (agp_priv->mem->is_bound)
return drm_agp_unbind_memory(agp_priv->mem);
if (agp_be->mem->is_bound)
return drm_agp_unbind_memory(agp_be->mem);
else
return 0;
}
static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) {
drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
DRM_AGP_MEM *mem = agp_priv->mem;
drm_agp_ttm_backend_t *agp_be =
container_of(backend, drm_agp_ttm_backend_t, backend);
DRM_AGP_MEM *mem = agp_be->mem;
DRM_DEBUG("drm_agp_clear_ttm\n");
if (mem) {
unsigned long num_pages = mem->page_count;
backend->unbind(backend);
backend->func->unbind(backend);
agp_free_memory(mem);
drm_free_memctl(num_pages *sizeof(void *));
}
agp_priv->mem = NULL;
agp_be->mem = NULL;
}
static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) {
drm_agp_ttm_priv *agp_priv;
drm_agp_ttm_backend_t *agp_be;
if (backend) {
DRM_DEBUG("drm_agp_destroy_ttm\n");
agp_priv = (drm_agp_ttm_priv *) backend->private;
if (agp_priv) {
if (agp_priv->mem) {
backend->clear(backend);
agp_be = container_of(backend, drm_agp_ttm_backend_t, backend);
if (agp_be) {
if (agp_be->mem) {
backend->func->clear(backend);
}
drm_ctl_free(agp_priv, sizeof(*agp_priv), DRM_MEM_MAPPINGS);
backend->private = NULL;
}
if (backend->flags & DRM_BE_FLAG_NEEDS_FREE) {
drm_ctl_free(backend, sizeof(*backend), DRM_MEM_MAPPINGS);
drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_TTM);
}
}
}
static drm_ttm_backend_func_t agp_ttm_backend =
{
.needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust,
.populate = drm_agp_populate,
.clear = drm_agp_clear_ttm,
.bind = drm_agp_bind_ttm,
.unbind = drm_agp_unbind_ttm,
.destroy = drm_agp_destroy_ttm,
};
drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
drm_ttm_backend_t *backend)
drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev)
{
drm_ttm_backend_t *agp_be;
drm_agp_ttm_priv *agp_priv;
drm_agp_ttm_backend_t *agp_be;
struct agp_kern_info *info;
if (!dev->agp) {
@ -690,37 +689,19 @@ drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
return NULL;
}
agp_be = (backend != NULL) ? backend:
drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_MAPPINGS);
agp_be = drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_TTM);
if (!agp_be)
return NULL;
agp_priv = drm_ctl_calloc(1, sizeof(*agp_priv), DRM_MEM_MAPPINGS);
agp_be->mem = NULL;
if (!agp_priv) {
drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_MAPPINGS);
return NULL;
}
agp_be->bridge = dev->agp->bridge;
agp_be->populated = FALSE;
agp_be->backend.func = &agp_ttm_backend;
agp_be->backend.mem_type = DRM_BO_MEM_TT;
agp_priv->mem = NULL;
agp_priv->alloc_type = AGP_USER_MEMORY;
agp_priv->cached_type = AGP_USER_CACHED_MEMORY;
agp_priv->uncached_type = AGP_USER_MEMORY;
agp_priv->bridge = dev->agp->bridge;
agp_priv->populated = FALSE;
agp_be->private = (void *) agp_priv;
agp_be->needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust;
agp_be->populate = drm_agp_populate;
agp_be->clear = drm_agp_clear_ttm;
agp_be->bind = drm_agp_bind_ttm;
agp_be->unbind = drm_agp_unbind_ttm;
agp_be->destroy = drm_agp_destroy_ttm;
DRM_FLAG_MASKED(agp_be->flags, (backend == NULL) ? DRM_BE_FLAG_NEEDS_FREE : 0,
DRM_BE_FLAG_NEEDS_FREE);
agp_be->drm_map_type = _DRM_AGP;
return agp_be;
return &agp_be->backend;
}
EXPORT_SYMBOL(drm_agp_init_ttm);

View File

@ -75,7 +75,8 @@ void drm_bo_add_to_lru(drm_buffer_object_t * bo)
{
drm_mem_type_manager_t *man;
if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
|| bo->mem.mem_type != bo->pinned_mem_type) {
man = &bo->dev->bm.man[bo->mem.mem_type];
list_add_tail(&bo->lru, &man->lru);
} else {
@ -88,6 +89,9 @@ static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci)
#ifdef DRM_ODD_MM_COMPAT
int ret;
if (!bo->map_list.map)
return 0;
ret = drm_bo_lock_kmm(bo);
if (ret)
return ret;
@ -95,6 +99,9 @@ static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci)
if (old_is_pci)
drm_bo_finish_unmap(bo);
#else
if (!bo->map_list.map)
return 0;
drm_bo_unmap_virtual(bo);
#endif
return 0;
@ -105,6 +112,9 @@ static void drm_bo_vm_post_move(drm_buffer_object_t * bo)
#ifdef DRM_ODD_MM_COMPAT
int ret;
if (!bo->map_list.map)
return;
ret = drm_bo_remap_bound(bo);
if (ret) {
DRM_ERROR("Failed to remap a bound buffer object.\n"
@ -130,6 +140,11 @@ static int drm_bo_add_ttm(drm_buffer_object_t * bo)
if (!bo->ttm)
ret = -ENOMEM;
break;
case drm_bo_type_kernel:
bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
if (!bo->ttm)
ret = -ENOMEM;
break;
case drm_bo_type_user:
case drm_bo_type_fake:
break;
@ -1023,30 +1038,23 @@ static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
int eagain_if_wait)
{
int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
unsigned long _end = jiffies + 3 * DRM_HZ;
if (ret && no_wait)
return -EBUSY;
else if (!ret)
return 0;
do {
mutex_unlock(&bo->mutex);
DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
!drm_bo_check_unfenced(bo));
mutex_lock(&bo->mutex);
if (ret == -EINTR)
return -EAGAIN;
if (ret) {
DRM_ERROR
("Error waiting for buffer to become fenced\n");
return ret;
}
ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
} while (ret && !time_after_eq(jiffies, _end));
ret = 0;
mutex_unlock(&bo->mutex);
DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
!drm_bo_check_unfenced(bo));
mutex_lock(&bo->mutex);
if (ret == -EINTR)
return -EAGAIN;
ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
if (ret) {
DRM_ERROR("Timeout waiting for buffer to become fenced\n");
return ret;
return -EBUSY;
}
if (eagain_if_wait)
return -EAGAIN;
@ -1405,7 +1413,10 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo,
} else if (bo->pinned_node != NULL) {
mutex_lock(&dev->struct_mutex);
drm_mm_put_block(bo->pinned_node);
if (bo->pinned_node != bo->mem.mm_node)
drm_mm_put_block(bo->pinned_node);
list_del_init(&bo->pinned_lru);
bo->pinned_node = NULL;
mutex_unlock(&dev->struct_mutex);
@ -1529,7 +1540,7 @@ static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
return ret;
}
int drm_buffer_object_create(drm_file_t * priv,
int drm_buffer_object_create(drm_device_t *dev,
unsigned long size,
drm_bo_type_t type,
uint32_t mask,
@ -1538,7 +1549,6 @@ int drm_buffer_object_create(drm_file_t * priv,
unsigned long buffer_start,
drm_buffer_object_t ** buf_obj)
{
drm_device_t *dev = priv->head->dev;
drm_buffer_manager_t *bm = &dev->bm;
drm_buffer_object_t *bo;
int ret = 0;
@ -1668,8 +1678,12 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS)
rep.ret = 0;
switch (req->op) {
case drm_bo_create:
rep.ret = drm_bo_lock_test(dev, filp);
if (rep.ret)
break;
rep.ret =
drm_buffer_object_create(priv, req->size,
drm_buffer_object_create(priv->head->dev,
req->size,
req->type,
req->mask,
req->hint,
@ -1718,16 +1732,8 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS)
drm_buffer_type, &uo);
if (rep.ret)
break;
mutex_lock(&dev->struct_mutex);
uo = drm_lookup_user_object(priv, req->handle);
entry =
drm_user_object_entry(uo, drm_buffer_object_t,
base);
atomic_dec(&entry->usage);
mutex_unlock(&dev->struct_mutex);
mutex_lock(&entry->mutex);
drm_bo_fill_rep_arg(entry, &rep);
mutex_unlock(&entry->mutex);
rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
break;
case drm_bo_unreference:
rep.ret = drm_user_object_unref(priv, req->handle,
@ -1991,10 +1997,16 @@ static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
drm_mem_type_manager_t *man = &bm->man[mem_type];
if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type);
DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
return -EINVAL;
}
if (!man->has_type) {
DRM_ERROR("Memory type %u has not been initialized.\n",
mem_type);
return 0;
}
drm_bo_clean_unfenced(dev);
ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
if (ret)
@ -2294,6 +2306,9 @@ void drm_bo_unmap_virtual(drm_buffer_object_t * bo)
loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
if (!dev->dev_mapping)
return;
unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
}

View File

@ -51,10 +51,8 @@ EXPORT_SYMBOL(drm_get_resource_len);
static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
drm_local_map_t *map)
{
struct list_head *list;
list_for_each(list, &dev->maplist->head) {
drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
drm_map_list_t *entry;
list_for_each_entry(entry, &dev->maplist, head) {
if (entry->map && map->type == entry->map->type &&
((entry->map->offset == map->offset) ||
(map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
@ -237,14 +235,14 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
* skipped and we double check that dev->agp->memory is
* actually set as well as being invalid before EPERM'ing
*/
for (entry = dev->agp->memory; entry; entry = entry->next) {
list_for_each_entry(entry, &dev->agp->memory, head) {
if ((map->offset >= entry->bound) &&
(map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
valid = 1;
break;
}
}
if (dev->agp->memory && !valid) {
if (!list_empty(&dev->agp->memory) && !valid) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -EPERM;
}
@ -288,7 +286,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
list->map = map;
mutex_lock(&dev->struct_mutex);
list_add(&list->head, &dev->maplist->head);
list_add(&list->head, &dev->maplist);
/* Assign a 32-bit handle */
@ -380,29 +378,28 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
*/
int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
{
struct list_head *list;
drm_map_list_t *r_list = NULL;
drm_map_list_t *r_list = NULL, *list_t;
drm_dma_handle_t dmah;
int found = 0;
/* Find the list entry for the map and remove it */
list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head);
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
if (r_list->map == map) {
list_del(list);
list_del(&r_list->head);
drm_ht_remove_key(&dev->map_hash,
r_list->user_token >> PAGE_SHIFT);
drm_free(list, sizeof(*list), DRM_MEM_MAPS);
drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
found = 1;
break;
}
}
if (!found) {
return -EINVAL;
}
/* List has wrapped around to the head pointer, or it's empty and we
* didn't find anything.
*/
if (list == (&dev->maplist->head)) {
return -EINVAL;
}
switch (map->type) {
case _DRM_REGISTERS:
@ -464,7 +461,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
drm_device_t *dev = priv->head->dev;
drm_map_t request;
drm_local_map_t *map = NULL;
struct list_head *list;
drm_map_list_t *r_list;
int ret;
if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) {
@ -472,9 +469,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
}
mutex_lock(&dev->struct_mutex);
list_for_each(list, &dev->maplist->head) {
drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map &&
r_list->user_token == (unsigned long)request.handle &&
r_list->map->flags & _DRM_REMOVABLE) {
@ -486,7 +481,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
/* List has wrapped around to the head pointer, or its empty we didn't
* find anything.
*/
if (list == (&dev->maplist->head)) {
if (list_empty(&dev->maplist) || !map) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
@ -610,14 +605,14 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
/* Make sure buffers are located in AGP memory that we own */
valid = 0;
for (agp_entry = dev->agp->memory; agp_entry; agp_entry = agp_entry->next) {
list_for_each_entry(agp_entry, &dev->agp->memory, head) {
if ((agp_offset >= agp_entry->bound) &&
(agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
valid = 1;
break;
}
}
if (dev->agp->memory && !valid) {
if (!list_empty(&dev->agp->memory) && !valid) {
DRM_DEBUG("zone invalid\n");
return -EINVAL;
}

View File

@ -275,12 +275,14 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
goto out_unlock;
}
pfn = page_to_pfn(page);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
vm_get_page_prot(vma->vm_flags) :
drm_io_prot(_DRM_TTM, vma);
}
err = vm_insert_pfn(vma, address, pfn);
if (!err || err == -EBUSY)
if (!err || err == -EBUSY)
data->type = VM_FAULT_MINOR;
else
data->type = VM_FAULT_OOM;

View File

@ -58,20 +58,17 @@
*/
void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
{
if (ctx_handle < 0)
goto failed;
if (!dev->ctx_bitmap)
goto failed;
struct drm_ctx_sarea_list *ctx_sarea;
if (ctx_handle < DRM_MAX_CTXBITMAP) {
mutex_lock(&dev->struct_mutex);
clear_bit(ctx_handle, dev->ctx_bitmap);
dev->context_sareas[ctx_handle] = NULL;
mutex_unlock(&dev->struct_mutex);
return;
}
failed:
DRM_ERROR("Attempt to free invalid context handle: %d\n", ctx_handle);
mutex_lock(&dev->struct_mutex);
ctx_sarea = idr_find(&dev->ctx_idr, ctx_handle);
if (ctx_sarea) {
idr_remove(&dev->ctx_idr, ctx_handle);
list_del(&ctx_sarea->head);
drm_free(ctx_sarea, sizeof(struct drm_ctx_sarea_list), DRM_MEM_BUFS);
} else
DRM_ERROR("Attempt to free invalid context handle: %d\n", ctx_handle);
mutex_unlock(&dev->struct_mutex);
return;
}
@ -87,56 +84,32 @@ void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
*/
static int drm_ctxbitmap_next(drm_device_t * dev)
{
int bit;
int new_id;
int ret;
struct drm_ctx_sarea_list *new_ctx;
if (!dev->ctx_bitmap)
new_ctx = drm_calloc(1, sizeof(struct drm_ctx_sarea_list), DRM_MEM_BUFS);
if (!new_ctx)
return -1;
mutex_lock(&dev->struct_mutex);
bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP);
if (bit < DRM_MAX_CTXBITMAP) {
set_bit(bit, dev->ctx_bitmap);
DRM_DEBUG("drm_ctxbitmap_next bit : %d\n", bit);
if ((bit + 1) > dev->max_context) {
dev->max_context = (bit + 1);
if (dev->context_sareas) {
drm_map_t **ctx_sareas;
ctx_sareas = drm_realloc(dev->context_sareas,
(dev->max_context -
1) *
sizeof(*dev->
context_sareas),
dev->max_context *
sizeof(*dev->
context_sareas),
DRM_MEM_MAPS);
if (!ctx_sareas) {
clear_bit(bit, dev->ctx_bitmap);
mutex_unlock(&dev->struct_mutex);
return -1;
}
dev->context_sareas = ctx_sareas;
dev->context_sareas[bit] = NULL;
} else {
/* max_context == 1 at this point */
dev->context_sareas =
drm_alloc(dev->max_context *
sizeof(*dev->context_sareas),
DRM_MEM_MAPS);
if (!dev->context_sareas) {
clear_bit(bit, dev->ctx_bitmap);
mutex_unlock(&dev->struct_mutex);
return -1;
}
dev->context_sareas[bit] = NULL;
}
}
mutex_unlock(&dev->struct_mutex);
return bit;
again:
if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
DRM_ERROR("Out of memory expanding drawable idr\n");
drm_free(new_ctx, sizeof(struct drm_ctx_sarea_list), DRM_MEM_BUFS);
return -ENOMEM;
}
mutex_lock(&dev->struct_mutex);
ret = idr_get_new_above(&dev->ctx_idr, new_ctx, DRM_RESERVED_CONTEXTS, &new_id);
if (ret == -EAGAIN) {
mutex_unlock(&dev->struct_mutex);
goto again;
}
mutex_unlock(&dev->struct_mutex);
return -1;
new_ctx->ctx_id = new_id;
list_add(&new_ctx->head, &dev->context_sarealist);
return new_id;
}
/**
@ -149,25 +122,8 @@ static int drm_ctxbitmap_next(drm_device_t * dev)
*/
int drm_ctxbitmap_init(drm_device_t * dev)
{
int i;
int temp;
mutex_lock(&dev->struct_mutex);
dev->ctx_bitmap = (unsigned long *)drm_alloc(PAGE_SIZE,
DRM_MEM_CTXBITMAP);
if (dev->ctx_bitmap == NULL) {
mutex_unlock(&dev->struct_mutex);
return -ENOMEM;
}
memset((void *)dev->ctx_bitmap, 0, PAGE_SIZE);
dev->context_sareas = NULL;
dev->max_context = -1;
mutex_unlock(&dev->struct_mutex);
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
temp = drm_ctxbitmap_next(dev);
DRM_DEBUG("drm_ctxbitmap_init : %d\n", temp);
}
idr_init(&dev->ctx_idr);
INIT_LIST_HEAD(&dev->context_sarealist);
return 0;
}
@ -182,12 +138,13 @@ int drm_ctxbitmap_init(drm_device_t * dev)
*/
void drm_ctxbitmap_cleanup(drm_device_t * dev)
{
struct drm_ctx_sarea_list *ctx_entry, *ctx_temp;
mutex_lock(&dev->struct_mutex);
if (dev->context_sareas)
drm_free(dev->context_sareas,
sizeof(*dev->context_sareas) *
dev->max_context, DRM_MEM_MAPS);
drm_free((void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP);
list_for_each_entry_safe(ctx_entry, ctx_temp, &dev->context_sarealist, head) {
idr_remove(&dev->ctx_idr, ctx_entry->ctx_id);
drm_free(ctx_entry, sizeof(struct drm_ctx_sarea_list), DRM_MEM_MAPS);
}
mutex_unlock(&dev->struct_mutex);
}
@ -218,22 +175,24 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
drm_ctx_priv_map_t request;
drm_map_t *map;
drm_map_list_t *_entry;
struct drm_ctx_sarea_list *ctx_sarea;
if (copy_from_user(&request, argp, sizeof(request)))
return -EFAULT;
mutex_lock(&dev->struct_mutex);
if (dev->max_context < 0
|| request.ctx_id >= (unsigned)dev->max_context) {
ctx_sarea = idr_find(&dev->ctx_idr, request.ctx_id);
if (!ctx_sarea) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
map = dev->context_sareas[request.ctx_id];
map = ctx_sarea->map;
mutex_unlock(&dev->struct_mutex);
request.handle = NULL;
list_for_each_entry(_entry, &dev->maplist->head,head) {
list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == map) {
request.handle =
(void *)(unsigned long)_entry->user_token;
@ -268,15 +227,14 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
drm_ctx_priv_map_t request;
drm_map_t *map = NULL;
drm_map_list_t *r_list = NULL;
struct list_head *list;
struct drm_ctx_sarea_list *ctx_sarea;
if (copy_from_user(&request,
(drm_ctx_priv_map_t __user *) arg, sizeof(request)))
return -EFAULT;
mutex_lock(&dev->struct_mutex);
list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map
&& r_list->user_token == (unsigned long) request.handle)
goto found;
@ -289,11 +247,15 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
map = r_list->map;
if (!map)
goto bad;
if (dev->max_context < 0)
mutex_lock(&dev->struct_mutex);
ctx_sarea = idr_find(&dev->ctx_idr, request.ctx_id);
if (!ctx_sarea) {
goto bad;
if (request.ctx_id >= (unsigned)dev->max_context)
goto bad;
dev->context_sareas[request.ctx_id] = map;
}
ctx_sarea->map = map;
mutex_unlock(&dev->struct_mutex);
return 0;
}
@ -449,7 +411,7 @@ int drm_addctx(struct inode *inode, struct file *filp,
ctx_entry->tag = priv;
mutex_lock(&dev->ctxlist_mutex);
list_add(&ctx_entry->head, &dev->ctxlist->head);
list_add(&ctx_entry->head, &dev->ctxlist);
++dev->ctx_count;
mutex_unlock(&dev->ctxlist_mutex);
@ -575,10 +537,10 @@ int drm_rmctx(struct inode *inode, struct file *filp,
}
mutex_lock(&dev->ctxlist_mutex);
if (!list_empty(&dev->ctxlist->head)) {
if (!list_empty(&dev->ctxlist)) {
drm_ctx_list_t *pos, *n;
list_for_each_entry_safe(pos, n, &dev->ctxlist->head, head) {
list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
if (pos->handle == ctx.handle) {
list_del(&pos->head);
drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);

View File

@ -44,82 +44,38 @@ int drm_adddraw(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
unsigned long irqflags;
int i, j;
u32 *bitfield = dev->drw_bitfield;
unsigned int bitfield_length = dev->drw_bitfield_length;
drm_drawable_info_t **info = dev->drw_info;
unsigned int info_length = dev->drw_info_length;
struct drm_drawable_list *draw_info;
drm_draw_t draw;
int new_id = 0;
int ret;
for (i = 0, j = 0; i < bitfield_length; i++) {
if (bitfield[i] == ~0)
continue;
draw_info = drm_calloc(1, sizeof(struct drm_drawable_list), DRM_MEM_BUFS);
if (!draw_info)
return -ENOMEM;
for (; j < 8 * sizeof(*bitfield); j++)
if (!(bitfield[i] & (1 << j)))
goto done;
again:
if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
DRM_ERROR("Out of memory expanding drawable idr\n");
drm_free(draw_info, sizeof(struct drm_drawable_list), DRM_MEM_BUFS);
return -ENOMEM;
}
done:
if (i == bitfield_length) {
bitfield_length++;
bitfield = drm_alloc(bitfield_length * sizeof(*bitfield),
DRM_MEM_BUFS);
if (!bitfield) {
DRM_ERROR("Failed to allocate new drawable bitfield\n");
return DRM_ERR(ENOMEM);
}
if (8 * sizeof(*bitfield) * bitfield_length > info_length) {
info_length += 8 * sizeof(*bitfield);
info = drm_alloc(info_length * sizeof(*info),
DRM_MEM_BUFS);
if (!info) {
DRM_ERROR("Failed to allocate new drawable info"
" array\n");
drm_free(bitfield,
bitfield_length * sizeof(*bitfield),
DRM_MEM_BUFS);
return DRM_ERR(ENOMEM);
}
}
bitfield[i] = 0;
}
draw.handle = i * 8 * sizeof(*bitfield) + j + 1;
DRM_DEBUG("%d\n", draw.handle);
spin_lock_irqsave(&dev->drw_lock, irqflags);
bitfield[i] |= 1 << j;
info[draw.handle - 1] = NULL;
if (bitfield != dev->drw_bitfield) {
memcpy(bitfield, dev->drw_bitfield, dev->drw_bitfield_length *
sizeof(*bitfield));
drm_free(dev->drw_bitfield, sizeof(*bitfield) *
dev->drw_bitfield_length, DRM_MEM_BUFS);
dev->drw_bitfield = bitfield;
dev->drw_bitfield_length = bitfield_length;
ret = idr_get_new_above(&dev->drw_idr, draw_info, 1, &new_id);
if (ret == -EAGAIN) {
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
goto again;
}
if (info != dev->drw_info) {
memcpy(info, dev->drw_info, dev->drw_info_length *
sizeof(*info));
drm_free(dev->drw_info, sizeof(*info) * dev->drw_info_length,
DRM_MEM_BUFS);
dev->drw_info = info;
dev->drw_info_length = info_length;
}
list_add(&draw_info->head, &dev->drwlist);
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
draw_info->id = new_id;
draw.handle = new_id;
DRM_DEBUG("%d\n", draw.handle);
DRM_COPY_TO_USER_IOCTL((drm_draw_t __user *)data, draw, sizeof(draw));
return 0;
@ -132,87 +88,25 @@ int drm_rmdraw(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_draw_t draw;
int id, idx;
unsigned int shift;
unsigned long irqflags;
u32 *bitfield = dev->drw_bitfield;
unsigned int bitfield_length = dev->drw_bitfield_length;
drm_drawable_info_t **info = dev->drw_info;
unsigned int info_length = dev->drw_info_length;
struct drm_drawable_list *draw_info;
DRM_COPY_FROM_USER_IOCTL(draw, (drm_draw_t __user *) data,
sizeof(draw));
id = draw.handle - 1;
idx = id / (8 * sizeof(*bitfield));
shift = id % (8 * sizeof(*bitfield));
if (idx < 0 || idx >= bitfield_length ||
!(bitfield[idx] & (1 << shift))) {
draw_info = idr_find(&dev->drw_idr, draw.handle);
if (!draw_info) {
DRM_DEBUG("No such drawable %d\n", draw.handle);
return 0;
return -EINVAL;
}
spin_lock_irqsave(&dev->drw_lock, irqflags);
bitfield[idx] &= ~(1 << shift);
list_del(&draw_info->head);
idr_remove(&dev->drw_idr, draw.handle);
drm_free(draw_info, sizeof(struct drm_drawable_list), DRM_MEM_BUFS);
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
if (info[id]) {
drm_free(info[id]->rects, info[id]->num_rects *
sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
drm_free(info[id], sizeof(**info), DRM_MEM_BUFS);
}
/* Can we shrink the arrays? */
if (idx == bitfield_length - 1) {
while (idx >= 0 && !bitfield[idx])
--idx;
bitfield_length = idx + 1;
if (idx != id / (8 * sizeof(*bitfield)))
bitfield = drm_alloc(bitfield_length *
sizeof(*bitfield), DRM_MEM_BUFS);
if (!bitfield && bitfield_length) {
bitfield = dev->drw_bitfield;
bitfield_length = dev->drw_bitfield_length;
}
}
if (bitfield != dev->drw_bitfield) {
info_length = 8 * sizeof(*bitfield) * bitfield_length;
info = drm_alloc(info_length * sizeof(*info), DRM_MEM_BUFS);
if (!info && info_length) {
info = dev->drw_info;
info_length = dev->drw_info_length;
}
spin_lock_irqsave(&dev->drw_lock, irqflags);
memcpy(bitfield, dev->drw_bitfield, bitfield_length *
sizeof(*bitfield));
drm_free(dev->drw_bitfield, sizeof(*bitfield) *
dev->drw_bitfield_length, DRM_MEM_BUFS);
dev->drw_bitfield = bitfield;
dev->drw_bitfield_length = bitfield_length;
if (info != dev->drw_info) {
memcpy(info, dev->drw_info, info_length *
sizeof(*info));
drm_free(dev->drw_info, sizeof(*info) *
dev->drw_info_length, DRM_MEM_BUFS);
dev->drw_info = info;
dev->drw_info_length = info_length;
}
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
}
DRM_DEBUG("%d\n", draw.handle);
return 0;
}
@ -220,36 +114,22 @@ int drm_rmdraw(DRM_IOCTL_ARGS)
int drm_update_drawable_info(DRM_IOCTL_ARGS) {
DRM_DEVICE;
drm_update_draw_t update;
unsigned int id, idx, shift, bitfield_length = dev->drw_bitfield_length;
u32 *bitfield = dev->drw_bitfield;
unsigned long irqflags;
drm_drawable_info_t *info;
drm_clip_rect_t *rects;
struct drm_drawable_list *draw_info;
int err;
DRM_COPY_FROM_USER_IOCTL(update, (drm_update_draw_t __user *) data,
sizeof(update));
id = update.handle - 1;
idx = id / (8 * sizeof(*bitfield));
shift = id % (8 * sizeof(*bitfield));
if (idx < 0 || idx >= bitfield_length ||
!(bitfield[idx] & (1 << shift))) {
draw_info = idr_find(&dev->drw_idr, update.handle);
if (!draw_info) {
DRM_ERROR("No such drawable %d\n", update.handle);
return DRM_ERR(EINVAL);
}
info = dev->drw_info[id];
if (!info) {
info = drm_calloc(1, sizeof(drm_drawable_info_t), DRM_MEM_BUFS);
if (!info) {
DRM_ERROR("Failed to allocate drawable info memory\n");
return DRM_ERR(ENOMEM);
}
}
info = &draw_info->info;
switch (update.type) {
case DRM_DRAWABLE_CLIPRECTS:
@ -284,12 +164,11 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) {
info->rects = rects;
info->num_rects = update.num;
dev->drw_info[id] = info;
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
DRM_DEBUG("Updated %d cliprects for drawable %d\n",
info->num_rects, id);
info->num_rects, update.handle);
break;
default:
DRM_ERROR("Invalid update type %d\n", update.type);
@ -299,11 +178,9 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) {
return 0;
error:
if (!dev->drw_info[id])
drm_free(info, sizeof(*info), DRM_MEM_BUFS);
else if (rects != dev->drw_info[id]->rects)
drm_free(rects, update.num *
sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
if (rects != info->rects)
drm_free(rects, update.num * sizeof(drm_clip_rect_t),
DRM_MEM_BUFS);
return err;
}
@ -312,19 +189,13 @@ error:
* Caller must hold the drawable spinlock!
*/
drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) {
u32 *bitfield = dev->drw_bitfield;
unsigned int idx, shift;
id--;
idx = id / (8 * sizeof(*bitfield));
shift = id % (8 * sizeof(*bitfield));
if (idx < 0 || idx >= dev->drw_bitfield_length ||
!(bitfield[idx] & (1 << shift))) {
struct drm_drawable_list *draw_info;
draw_info = idr_find(&dev->drw_idr, id);
if (!draw_info) {
DRM_DEBUG("No such drawable %d\n", id);
return NULL;
}
return dev->drw_info[id];
return &draw_info->info;
}
EXPORT_SYMBOL(drm_get_drawable_info);

View File

@ -140,8 +140,9 @@ static drm_ioctl_desc_t drm_ioctls[] = {
int drm_lastclose(drm_device_t * dev)
{
drm_magic_entry_t *pt, *next;
drm_map_list_t *r_list;
drm_vma_entry_t *vma, *vma_next;
drm_map_list_t *r_list, *list_t;
drm_vma_entry_t *vma, *vma_temp;
struct drm_drawable_list *drw_entry, *drw_temp;
int i;
DRM_DEBUG("\n");
@ -166,15 +167,13 @@ int drm_lastclose(drm_device_t * dev)
drm_irq_uninstall(dev);
/* Free drawable information memory */
for (i = 0; i < dev->drw_bitfield_length / sizeof(*dev->drw_bitfield);
i++) {
drm_drawable_info_t *info = drm_get_drawable_info(dev, i);
list_for_each_entry_safe(drw_entry, drw_temp, &dev->drwlist, head) {
drm_free(drw_entry->info.rects, drw_entry->info.num_rects *
sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
if (info) {
drm_free(info->rects, info->num_rects *
sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
drm_free(info, sizeof(*info), DRM_MEM_BUFS);
}
idr_remove(&dev->drw_idr, drw_entry->id);
list_del(&drw_entry->head);
drm_free(drw_entry, sizeof(struct drm_drawable_list), DRM_MEM_BUFS);
}
mutex_lock(&dev->struct_mutex);
@ -198,19 +197,17 @@ int drm_lastclose(drm_device_t * dev)
/* Clear AGP information */
if (drm_core_has_AGP(dev) && dev->agp) {
drm_agp_mem_t *entry;
drm_agp_mem_t *nexte;
drm_agp_mem_t *entry, *tempe;
/* Remove AGP resources, but leave dev->agp
intact until drv_cleanup is called. */
for (entry = dev->agp->memory; entry; entry = nexte) {
nexte = entry->next;
list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
if (entry->bound)
drm_unbind_agp(entry->memory);
drm_free_agp(entry->memory, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
}
dev->agp->memory = NULL;
INIT_LIST_HEAD(&dev->agp->memory);
if (dev->agp->acquired)
drm_agp_release(dev);
@ -224,20 +221,14 @@ int drm_lastclose(drm_device_t * dev)
}
/* Clear vma list (only built for debugging) */
if (dev->vmalist) {
for (vma = dev->vmalist; vma; vma = vma_next) {
vma_next = vma->next;
drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
dev->vmalist = NULL;
list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
list_del(&vma->head);
drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
if (dev->maplist) {
while (!list_empty(&dev->maplist->head)) {
struct list_head *list = dev->maplist->head.next;
r_list = list_entry(list, drm_map_list_t, head);
drm_rmmap_locked(dev, r_list->map);
}
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
drm_rmmap_locked(dev, r_list->map);
r_list = NULL;
}
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
@ -372,13 +363,9 @@ static void drm_cleanup(drm_device_t * dev)
drm_lastclose(dev);
drm_fence_manager_takedown(dev);
if (dev->maplist) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
dev->maplist = NULL;
drm_ht_remove(&dev->map_hash);
drm_mm_takedown(&dev->offset_manager);
drm_ht_remove(&dev->object_hash);
}
drm_ht_remove(&dev->map_hash);
drm_mm_takedown(&dev->offset_manager);
drm_ht_remove(&dev->object_hash);
if (!drm_fb_loaded)
pci_disable_device(dev->pdev);
@ -594,7 +581,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
goto err_i1;
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
&& (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE))
ioctl = &drm_ioctls[nr];
else
@ -622,3 +609,17 @@ err_i1:
return retcode;
}
EXPORT_SYMBOL(drm_ioctl);
drm_local_map_t *drm_getsarea(struct drm_device *dev)
{
drm_map_list_t *entry;
list_for_each_entry(entry, &dev->maplist, head) {
if (entry->map && entry->map->type == _DRM_SHM &&
(entry->map->flags & _DRM_CONTAINS_LOCK)) {
return entry->map;
}
}
return NULL;
}
EXPORT_SYMBOL(drm_getsarea);

View File

@ -1,8 +1,8 @@
/**************************************************************************
*
*
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
@ -10,17 +10,17 @@
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
@ -49,8 +49,6 @@ void drm_fence_handler(drm_device_t * dev, uint32_t class,
int is_exe = (type & DRM_FENCE_TYPE_EXE);
int ge_last_exe;
diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
if (fc->pending_exe_flush && is_exe && diff < driver->wrap_diff)
@ -59,13 +57,13 @@ void drm_fence_handler(drm_device_t * dev, uint32_t class,
diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;
ge_last_exe = diff < driver->wrap_diff;
if (ge_last_exe)
if (ge_last_exe)
fc->pending_flush &= ~type;
if (is_exe && ge_last_exe) {
fc->last_exe_flush = sequence;
}
if (list_empty(&fc->ring))
return;
@ -107,9 +105,8 @@ void drm_fence_handler(drm_device_t * dev, uint32_t class,
fence->base.hash.key);
list_del_init(&fence->ring);
}
}
if (wake) {
DRM_WAKEUP(&fc->fence_queue);
}
@ -266,7 +263,7 @@ void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence)
fc->exe_flush_sequence = sequence - (driver->flush_diff / 2);
}
write_unlock_irqrestore(&fm->lock, flags);
mutex_lock(&dev->struct_mutex);
read_lock_irqsave(&fm->lock, flags);
@ -295,17 +292,21 @@ static int drm_fence_lazy_wait(drm_device_t *dev,
{
drm_fence_manager_t *fm = &dev->fm;
drm_fence_class_manager_t *fc = &fm->class[fence->class];
int signaled;
unsigned long _end = jiffies + 3*DRM_HZ;
int ret = 0;
do {
DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
fence_signaled(dev, fence, mask, 0));
(signaled = fence_signaled(dev, fence, mask, 1)));
if (signaled)
return 0;
if (time_after_eq(jiffies, _end))
break;
} while (ret == -EINTR && ignore_signals);
if (time_after_eq(jiffies, _end) && (ret != 0))
if (fence_signaled(dev, fence, mask, 0))
return 0;
if (time_after_eq(jiffies, _end))
ret = -EBUSY;
if (ret) {
if (ret == -EBUSY) {
@ -409,7 +410,7 @@ int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
fence->signaled = 0x00;
fence->sequence = sequence;
fence->native_type = native_type;
if (list_empty(&fc->ring))
if (list_empty(&fc->ring))
fc->last_exe_flush = sequence - 1;
list_add_tail(&fence->ring, &fc->ring);
write_unlock_irqrestore(&fm->lock, flags);
@ -494,8 +495,7 @@ void drm_fence_manager_init(drm_device_t * dev)
drm_fence_driver_t *fed = dev->driver->fence_driver;
int i;
fm->lock = RW_LOCK_UNLOCKED;
rwlock_init(&fm->lock);
write_lock(&fm->lock);
fm->initialized = 0;
if (!fed)

View File

@ -79,13 +79,6 @@ static int drm_setup(drm_device_t * dev)
drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER);
INIT_LIST_HEAD(&dev->magicfree);
dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist), DRM_MEM_CTXLIST);
if (dev->ctxlist == NULL)
return -ENOMEM;
memset(dev->ctxlist, 0, sizeof(*dev->ctxlist));
INIT_LIST_HEAD(&dev->ctxlist->head);
dev->vmalist = NULL;
dev->sigdata.lock = NULL;
init_waitqueue_head(&dev->lock.lock_queue);
dev->queue_count = 0;
@ -268,6 +261,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
priv->authenticated = capable(CAP_SYS_ADMIN);
priv->lock_count = 0;
INIT_LIST_HEAD(&priv->lhead);
INIT_LIST_HEAD(&priv->user_objects);
INIT_LIST_HEAD(&priv->refd_objects);
@ -291,19 +285,10 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
}
mutex_lock(&dev->struct_mutex);
if (!dev->file_last) {
priv->next = NULL;
priv->prev = NULL;
dev->file_first = priv;
dev->file_last = priv;
/* first opener automatically becomes master */
if (list_empty(&dev->filelist))
priv->master = 1;
} else {
priv->next = NULL;
priv->prev = dev->file_last;
dev->file_last->next = priv;
dev->file_last = priv;
}
list_add(&priv->lhead, &dev->filelist);
mutex_unlock(&dev->struct_mutex);
#ifdef __alpha__
@ -480,10 +465,10 @@ int drm_release(struct inode *inode, struct file *filp)
mutex_lock(&dev->ctxlist_mutex);
if (dev->ctxlist && (!list_empty(&dev->ctxlist->head))) {
if (!list_empty(&dev->ctxlist)) {
drm_ctx_list_t *pos, *n;
list_for_each_entry_safe(pos, n, &dev->ctxlist->head, head) {
list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
if (pos->tag == priv &&
pos->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
@ -503,22 +488,12 @@ int drm_release(struct inode *inode, struct file *filp)
mutex_lock(&dev->struct_mutex);
drm_object_release(filp);
if (priv->remove_auth_on_close == 1) {
drm_file_t *temp = dev->file_first;
while (temp) {
drm_file_t *temp;
list_for_each_entry(temp, &dev->filelist, lhead)
temp->authenticated = 0;
temp = temp->next;
}
}
if (priv->prev) {
priv->prev->next = priv->next;
} else {
dev->file_first = priv->next;
}
if (priv->next) {
priv->next->prev = priv->prev;
} else {
dev->file_last = priv->prev;
}
list_del(&priv->lhead);
mutex_unlock(&dev->struct_mutex);
if (dev->driver->postclose)

View File

@ -199,7 +199,7 @@ int drm_getmap(struct inode *inode, struct file *filp,
}
i = 0;
list_for_each(list, &dev->maplist->head) {
list_for_each(list, &dev->maplist) {
if (i == idx) {
r_list = list_entry(list, drm_map_list_t, head);
break;
@ -252,12 +252,18 @@ int drm_getclient(struct inode *inode, struct file *filp,
return -EFAULT;
idx = client.idx;
mutex_lock(&dev->struct_mutex);
for (i = 0, pt = dev->file_first; i < idx && pt; i++, pt = pt->next) ;
if (!pt) {
if (list_empty(&dev->filelist)) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
i = 0;
list_for_each_entry(pt, &dev->filelist, lhead) {
if (i++ >= idx)
break;
}
client.auth = pt->authenticated;
client.pid = pt->pid;
client.uid = pt->uid;

View File

@ -119,8 +119,8 @@ static int drm_irq_install(drm_device_t * dev)
spin_lock_init(&dev->vbl_lock);
INIT_LIST_HEAD(&dev->vbl_sigs.head);
INIT_LIST_HEAD(&dev->vbl_sigs2.head);
INIT_LIST_HEAD(&dev->vbl_sigs);
INIT_LIST_HEAD(&dev->vbl_sigs2);
dev->vbl_pending = 0;
}
@ -290,7 +290,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
if (flags & _DRM_VBLANK_SIGNAL) {
unsigned long irqflags;
drm_vbl_sig_t *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
? &dev->vbl_sigs2 : &dev->vbl_sigs;
drm_vbl_sig_t *vbl_sig;
@ -300,7 +300,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
* for the same vblank sequence number; nothing to be done in
* that case
*/
list_for_each_entry(vbl_sig, &vbl_sigs->head, head) {
list_for_each_entry(vbl_sig, vbl_sigs, head) {
if (vbl_sig->sequence == vblwait.request.sequence
&& vbl_sig->info.si_signo == vblwait.request.signal
&& vbl_sig->task == current) {
@ -334,7 +334,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
spin_lock_irqsave(&dev->vbl_lock, irqflags);
list_add_tail((struct list_head *)vbl_sig, &vbl_sigs->head);
list_add_tail(&vbl_sig->head, vbl_sigs);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@ -377,20 +377,18 @@ void drm_vbl_send_signals(drm_device_t * dev)
spin_lock_irqsave(&dev->vbl_lock, flags);
for (i = 0; i < 2; i++) {
struct list_head *list, *tmp;
drm_vbl_sig_t *vbl_sig;
drm_vbl_sig_t *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
drm_vbl_sig_t *vbl_sig, *tmp;
struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
&dev->vbl_received);
list_for_each_safe(list, tmp, &vbl_sigs->head) {
vbl_sig = list_entry(list, drm_vbl_sig_t, head);
list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
vbl_sig->info.si_code = vbl_seq;
send_sig_info(vbl_sig->info.si_signo,
&vbl_sig->info, vbl_sig->task);
list_del(list);
list_del(&vbl_sig->head);
drm_free(vbl_sig, sizeof(*vbl_sig),
DRM_MEM_DRIVER);

View File

@ -47,7 +47,7 @@ static struct {
static inline size_t drm_size_align(size_t size) {
register size_t tmpSize = 4;
size_t tmpSize = 4;
if (size > PAGE_SIZE)
return PAGE_ALIGN(size);
@ -228,7 +228,7 @@ static void *agp_remap(unsigned long offset, unsigned long size,
offset -= dev->hose->mem_space->start;
#endif
for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next)
list_for_each_entry(agpmem, &dev->agp->memory, head)
if (agpmem->bound <= offset
&& (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
(offset + size))

View File

@ -240,11 +240,17 @@ int drm_user_object_ref(drm_file_t * priv, uint32_t user_token,
{
drm_device_t *dev = priv->head->dev;
drm_user_object_t *uo;
drm_hash_item_t *hash;
int ret;
mutex_lock(&dev->struct_mutex);
uo = drm_lookup_user_object(priv, user_token);
if (!uo || (uo->type != type)) {
ret = drm_ht_find_item(&dev->object_hash, user_token, &hash);
if (ret) {
DRM_ERROR("Could not find user object to reference.\n");
goto out_err;
}
uo = drm_hash_entry(hash, drm_user_object_t, hash);
if (uo->type != type) {
ret = -EINVAL;
goto out_err;
}
@ -253,7 +259,6 @@ int drm_user_object_ref(drm_file_t * priv, uint32_t user_token,
goto out_err;
mutex_unlock(&dev->struct_mutex);
*object = uo;
DRM_ERROR("Referenced an object\n");
return 0;
out_err:
mutex_unlock(&dev->struct_mutex);
@ -281,7 +286,6 @@ int drm_user_object_unref(drm_file_t * priv, uint32_t user_token,
}
drm_remove_ref_object(priv, ro);
mutex_unlock(&dev->struct_mutex);
DRM_ERROR("Unreferenced an object\n");
return 0;
out_err:
mutex_unlock(&dev->struct_mutex);

View File

@ -29,8 +29,7 @@
*/
#ifndef _DRM_OBJECTS_H
#define _DRM_OJBECTS_H
#define DRM_HAS_TTM
#define _DRM_OBJECTS_H
struct drm_device;
@ -234,10 +233,8 @@ extern int drm_fence_ioctl(DRM_IOCTL_ARGS);
#define DRM_BE_FLAG_NEEDS_FREE 0x00000001
#define DRM_BE_FLAG_BOUND_CACHED 0x00000002
typedef struct drm_ttm_backend {
void *private;
uint32_t flags;
uint32_t drm_map_type;
struct drm_ttm_backend;
typedef struct drm_ttm_backend_func {
int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend);
int (*populate) (struct drm_ttm_backend * backend,
unsigned long num_pages, struct page ** pages);
@ -246,6 +243,13 @@ typedef struct drm_ttm_backend {
unsigned long offset, int cached);
int (*unbind) (struct drm_ttm_backend * backend);
void (*destroy) (struct drm_ttm_backend * backend);
} drm_ttm_backend_func_t;
typedef struct drm_ttm_backend {
uint32_t flags;
int mem_type;
drm_ttm_backend_func_t *func;
} drm_ttm_backend_t;
typedef struct drm_ttm {

View File

@ -119,24 +119,6 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
#define DRM_GET_PRIV_WITH_RETURN(_priv, _filp) _priv = _filp->private_data
/**
* Get the pointer to the SAREA.
*
* Searches the SAREA on the mapping lists and points drm_device::sarea to it.
*/
#define DRM_GETSAREA() \
do { \
drm_map_list_t *entry; \
list_for_each_entry( entry, &dev->maplist->head, head ) { \
if ( entry->map && \
entry->map->type == _DRM_SHM && \
(entry->map->flags & _DRM_CONTAINS_LOCK) ) { \
dev_priv->sarea = entry->map; \
break; \
} \
} \
} while (0)
#define DRM_HZ HZ
#define DRM_WAIT_ON( ret, queue, timeout, condition ) \

View File

@ -51,10 +51,8 @@ drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
dma_addr_t maxaddr)
{
drm_dma_handle_t *dmah;
#if 1
unsigned long addr;
size_t sz;
#endif
#ifdef DRM_DEBUG_MEMORY
int area = DRM_MEM_DMA;

View File

@ -211,7 +211,6 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
int len = 0;
drm_map_t *map;
drm_map_list_t *r_list;
struct list_head *list;
/* Hardcoded from _DRM_FRAME_BUFFER,
_DRM_REGISTERS, _DRM_SHM, _DRM_AGP,
@ -231,9 +230,7 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
DRM_PROC_PRINT("slot offset size type flags "
"address mtrr\n\n");
i = 0;
if (dev->maplist != NULL)
list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head);
list_for_each_entry(r_list, &dev->maplist, head) {
map = r_list->map;
if (!map)
continue;
@ -242,10 +239,10 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
else
type = types[map->type];
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
i,
map->offset,
map->size, type, map->flags,
(unsigned long) r_list->user_token);
i,
map->offset,
map->size, type, map->flags,
(unsigned long) r_list->user_token);
if (map->mtrr < 0) {
DRM_PROC_PRINT("none\n");
@ -253,7 +250,7 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
DRM_PROC_PRINT("%4d\n", map->mtrr);
}
i++;
}
}
if (len > request + offset)
return request;
@ -535,7 +532,7 @@ static int drm__clients_info(char *buf, char **start, off_t offset,
*eof = 0;
DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n");
for (priv = dev->file_first; priv; priv = priv->next) {
list_for_each_entry(priv, &dev->filelist, lhead) {
DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
priv->authenticated ? 'y' : 'n',
priv->minor,
@ -588,7 +585,7 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
atomic_read(&dev->vma_count),
high_memory, virt_to_phys(high_memory));
for (pt = dev->vmalist; pt; pt = pt->next) {
list_for_each_entry(pt, &dev->vmalist, head) {
if (!(vma = pt->vma))
continue;
DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",

View File

@ -60,6 +60,12 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
{
int retcode;
INIT_LIST_HEAD(&dev->drwlist);
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->ctxlist);
INIT_LIST_HEAD(&dev->vmalist);
INIT_LIST_HEAD(&dev->maplist);
spin_lock_init(&dev->count_lock);
spin_lock_init(&dev->drw_lock);
spin_lock_init(&dev->tasklet_lock);
@ -70,6 +76,8 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
mutex_init(&dev->bm.init_mutex);
mutex_init(&dev->bm.evict_mutex);
idr_init(&dev->drw_idr);
dev->pdev = pdev;
dev->pci_device = pdev->device;
dev->pci_vendor = pdev->vendor;
@ -80,28 +88,20 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
dev->irq = pdev->irq;
if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER)) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
return -ENOMEM;
}
if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE)) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
drm_ht_remove(&dev->map_hash);
return -ENOMEM;
}
if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
drm_ht_remove(&dev->map_hash);
drm_mm_takedown(&dev->offset_manager);
return -ENOMEM;
}
dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS);
if (dev->maplist == NULL)
return -ENOMEM;
INIT_LIST_HEAD(&dev->maplist->head);
/* the DRM has 6 counters */
dev->counters = 6;
dev->types[0] = _DRM_STAT_LOCK;

View File

@ -154,7 +154,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
be = ttm->be;
if (be) {
be->destroy(be);
be->func->destroy(be);
ttm->be = NULL;
}
@ -222,7 +222,7 @@ static int drm_ttm_populate(drm_ttm_t * ttm)
if (!page)
return -ENOMEM;
}
be->populate(be, ttm->num_pages, ttm->pages);
be->func->populate(be, ttm->num_pages, ttm->pages);
ttm->state = ttm_unbound;
return 0;
}
@ -281,7 +281,7 @@ void drm_ttm_evict(drm_ttm_t * ttm)
int ret;
if (ttm->state == ttm_bound) {
ret = be->unbind(be);
ret = be->func->unbind(be);
BUG_ON(ret);
}
@ -293,7 +293,7 @@ void drm_ttm_fixup_caching(drm_ttm_t * ttm)
if (ttm->state == ttm_evicted) {
drm_ttm_backend_t *be = ttm->be;
if (be->needs_ub_cache_adjust(be)) {
if (be->func->needs_ub_cache_adjust(be)) {
drm_set_caching(ttm, 0);
}
ttm->state = ttm_unbound;
@ -329,7 +329,7 @@ int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
}
if ((ret = be->bind(be, aper_offset, cached))) {
if ((ret = be->func->bind(be, aper_offset, cached))) {
ttm->state = ttm_evicted;
DRM_ERROR("Couldn't bind backend.\n");
return ret;

View File

@ -122,7 +122,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
/*
* It's AGP memory - find the real physical page to map
*/
for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
list_for_each_entry(agpmem, &dev->agp->memory, head) {
if (agpmem->bound <= baddr &&
agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
break;
@ -205,10 +205,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->head->dev;
drm_vma_entry_t *pt, *prev, *next;
drm_vma_entry_t *pt, *temp;
drm_map_t *map;
drm_map_list_t *r_list;
struct list_head *list;
int found_maps = 0;
DRM_DEBUG("0x%08lx,0x%08lx\n",
@ -218,19 +217,12 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
map = vma->vm_private_data;
mutex_lock(&dev->struct_mutex);
for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
next = pt->next;
list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
if (pt->vma->vm_private_data == map)
found_maps++;
if (pt->vma == vma) {
if (prev) {
prev->next = pt->next;
} else {
dev->vmalist = pt->next;
}
list_del(&pt->head);
drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
} else {
prev = pt;
}
}
/* We were the only map that was found */
@ -239,9 +231,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
* we delete this mappings information.
*/
found_maps = 0;
list = &dev->maplist->head;
list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map == map)
found_maps++;
}
@ -439,9 +429,8 @@ static void drm_vm_open_locked(struct vm_area_struct *vma)
vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
if (vma_entry) {
vma_entry->vma = vma;
vma_entry->next = dev->vmalist;
vma_entry->pid = current->pid;
dev->vmalist = vma_entry;
list_add(&vma_entry->head, &dev->vmalist);
}
}
@ -467,20 +456,16 @@ static void drm_vm_close(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->head->dev;
drm_vma_entry_t *pt, *prev;
drm_vma_entry_t *pt, *temp;
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
atomic_dec(&dev->vma_count);
mutex_lock(&dev->struct_mutex);
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
if (pt->vma == vma) {
if (prev) {
prev->next = pt->next;
} else {
dev->vmalist = pt->next;
}
list_del(&pt->head);
drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
break;
}
@ -516,8 +501,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
return -EINVAL;
}
if (!capable(CAP_SYS_ADMIN) &&
(dma->flags & _DRM_DMA_USE_PCI_RO)) {
if (!capable(CAP_SYS_ADMIN) && (dma->flags & _DRM_DMA_USE_PCI_RO)) {
vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
#if defined(__i386__) || defined(__x86_64__)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
@ -739,7 +723,7 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
unsigned long bus_base;
unsigned long bus_offset;
unsigned long bus_size;
int ret = NOPFN_REFAULT;
unsigned long ret = NOPFN_REFAULT;
if (address > vma->vm_end)
return NOPFN_SIGBUS;
@ -796,7 +780,9 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
goto out_unlock;
}
pfn = page_to_pfn(page);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
vm_get_page_prot(vma->vm_flags) :
drm_io_prot(_DRM_TTM, vma);
}
err = vm_insert_pfn(vma, address, pfn);

View File

@ -346,12 +346,10 @@ static int i810_dma_initialize(drm_device_t * dev,
drm_i810_private_t * dev_priv,
drm_i810_init_t * init)
{
struct list_head *list;
drm_map_list_t *r_list;
memset(dev_priv, 0, sizeof(drm_i810_private_t));
list_for_each(list, &dev->maplist->head) {
drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map &&
r_list->map->type == _DRM_SHM &&
r_list->map->flags & _DRM_CONTAINS_LOCK) {

View File

@ -35,7 +35,7 @@
drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t * dev)
{
return drm_agp_init_ttm(dev, NULL);
return drm_agp_init_ttm(dev);
}
int i915_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type)

View File

@ -93,7 +93,7 @@ static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED |
DRIVER_IRQ_VBL,
DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
.dev_priv_size = sizeof(drm_radeon_buf_priv_t),
.load = radeon_driver_load,
.firstopen = radeon_driver_firstopen,
@ -103,6 +103,7 @@ static struct drm_driver driver = {
.lastclose = radeon_driver_lastclose,
.unload = radeon_driver_unload,
.vblank_wait = radeon_driver_vblank_wait,
.vblank_wait2 = radeon_driver_vblank_wait2,
.dri_library_name = dri_library_name,
.irq_preinstall = radeon_driver_irq_preinstall,
.irq_postinstall = radeon_driver_irq_postinstall,

View File

@ -233,7 +233,7 @@ static drm_local_map_t *sis_reg_init(drm_device_t *dev)
drm_map_list_t *entry;
drm_local_map_t *map;
list_for_each_entry(entry, &dev->maplist->head, head) {
list_for_each_entry(entry, &dev->maplist, head) {
map = entry->map;
if (!map)
continue;

View File

@ -34,7 +34,7 @@
drm_ttm_backend_t *via_create_ttm_backend_entry(drm_device_t * dev)
{
return drm_agp_init_ttm(dev, NULL);
return drm_agp_init_ttm(dev);
}
int via_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type)

View File

@ -48,4 +48,10 @@ do
unifdef -D__linux__ -DI915_HAVE_FENCE -DI915_HAVE_BUFFER $i > $i.tmp
mv $i.tmp $i
done
for i in drm*.[ch]
do
unifdef -UDRM_ODD_MM_COMPAT -D__linux__ $i > $i.tmp
mv $i.tmp $i
done
cd -

View File

@ -80,14 +80,7 @@
#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
#if (defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) && defined(IN_MODULE)
/* Prevent name collision when including sys/ioccom.h */
#undef ioctl
#include <sys/ioccom.h>
#define ioctl(a,b,c) xf86ioctl(a,b,c)
#else
#include <sys/ioccom.h>
#endif /* __FreeBSD__ && xf86ioctl */
#define DRM_IOCTL_NR(n) ((n) & 0xff)
#define DRM_IOC_VOID IOC_VOID
#define DRM_IOC_READ IOC_OUT
@ -796,7 +789,8 @@ typedef struct drm_fence_arg {
typedef enum {
drm_bo_type_dc,
drm_bo_type_user,
drm_bo_type_fake
drm_bo_type_fake,
drm_bo_type_kernel, /* for initial kernel allocations */
}drm_bo_type_t;

View File

@ -98,12 +98,20 @@
0x1002 0x5653 CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X700 M26"
0x1002 0x5834 CHIP_RS300|RADEON_IS_IGP "ATI Radeon RS300 9100 IGP"
0x1002 0x5835 CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY "ATI Radeon RS300 Mobility IGP"
0x1002 0x5954 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI RS480 XPRESS 200G"
0x1002 0x5955 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon XPRESS 200M 5955"
0x1002 0x5974 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RS482 XPRESS 200"
0x1002 0x5975 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RS485 XPRESS 1100 IGP"
0x1002 0x5960 CHIP_RV280 "ATI Radeon RV280 9250"
0x1002 0x5961 CHIP_RV280 "ATI Radeon RV280 9200"
0x1002 0x5962 CHIP_RV280 "ATI Radeon RV280 9200"
0x1002 0x5964 CHIP_RV280 "ATI Radeon RV280 9200 SE"
0x1002 0x5965 CHIP_RV280 "ATI FireMV 2200 PCI"
0x1002 0x5969 CHIP_RV100 "ATI ES1000 RN50"
0x1002 0x5a41 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RS400 XPRESS 200"
0x1002 0x5a42 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RS400 XPRESS 200M"
0x1002 0x5a61 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RC410 XPRESS 200"
0x1002 0x5a62 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RC410 XPRESS 200M"
0x1002 0x5b60 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X300 SE"
0x1002 0x5b62 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X600 Pro"
0x1002 0x5b63 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X550"
@ -186,7 +194,6 @@
0x1002 0x4c51 0 "3D Rage LT Pro"
0x1002 0x4c42 0 "3D Rage LT Pro AGP-133"
0x1002 0x4c44 0 "3D Rage LT Pro AGP-66"
0x1002 0x4759 0 "Rage 3D IICATI 3D RAGE IIC AGP(A12/A13)"
0x1002 0x474c 0 "Rage XC"
0x1002 0x474f 0 "Rage XL"
0x1002 0x4752 0 "Rage XL"
@ -275,11 +282,13 @@
0x8086 0x2592 CHIP_I9XX|CHIP_I915 "Intel i915GM"
0x8086 0x2772 CHIP_I9XX|CHIP_I915 "Intel i945G"
0x8086 0x27A2 CHIP_I9XX|CHIP_I915 "Intel i945GM"
0x8086 0x27AE CHIP_I9XX|CHIP_I915 "Intel i945GME"
0x8086 0x2972 CHIP_I9XX|CHIP_I965 "Intel i946GZ"
0x8086 0x2982 CHIP_I9XX|CHIP_I965 "Intel i965G"
0x8086 0x2992 CHIP_I9XX|CHIP_I965 "Intel i965Q"
0x8086 0x29A2 CHIP_I9XX|CHIP_I965 "Intel i965G"
0x8086 0x2A02 CHIP_I9XX|CHIP_I965 "Intel i965GM"
0x8086 0x2A12 CHIP_I9XX|CHIP_I965 "Intel i965GME/GLE"
[imagine]
0x105d 0x2309 IMAGINE_128 "Imagine 128"
@ -357,7 +366,7 @@
0x10DE 0x0309 NV30 "NVidia Quadro FX 1000"
0x10DE 0x0311 NV30 "NVidia GeForce FX 5600 Ultra"
0x10DE 0x0312 NV30 "NVidia GeForce FX 5600"
0x10DE 0x0313 NV30 "NVidia 0x0313"},
0x10DE 0x0313 NV30 "NVidia 0x0313"
0x10DE 0x0314 NV30 "NVidia GeForce FX 5600SE"
0x10DE 0x0316 NV30 "NVidia 0x0316"
0x10DE 0x0317 NV30 "NVidia 0x0317"

View File

@ -35,7 +35,8 @@
dev->pci_device == 0x2982 || \
dev->pci_device == 0x2992 || \
dev->pci_device == 0x29A2 || \
dev->pci_device == 0x2A02)
dev->pci_device == 0x2A02 || \
dev->pci_device == 0x2A12)
/* Really want an OS-independent resettable timer. Would like to have
@ -123,7 +124,7 @@ static int i915_initialize(drm_device_t * dev,
{
memset(dev_priv, 0, sizeof(drm_i915_private_t));
DRM_GETSAREA();
dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("can not find sarea!\n");
dev->dev_private = (void *)dev_priv;
@ -177,6 +178,10 @@ static int i915_initialize(drm_device_t * dev,
*/
dev_priv->allow_batchbuffer = 1;
/* Enable vblank on pipe A for older X servers
*/
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
/* Program Hardware Status Page */
dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE,
0xffffffff);
@ -467,7 +472,9 @@ int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush)
static int i915_dispatch_cmdbuffer(drm_device_t * dev,
drm_i915_cmdbuffer_t * cmd)
{
#ifdef I915_HAVE_FENCE
drm_i915_private_t *dev_priv = dev->dev_private;
#endif
int nbox = cmd->num_cliprects;
int i = 0, count, ret;
@ -643,7 +650,6 @@ static int i915_batchbuffer(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 *hw_status = dev_priv->hw_status_page;
drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
dev_priv->sarea_priv;
drm_i915_batchbuffer_t batch;
@ -669,7 +675,7 @@ static int i915_batchbuffer(DRM_IOCTL_ARGS)
ret = i915_dispatch_batchbuffer(dev, &batch);
sarea_priv->last_dispatch = (int)hw_status[5];
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
return ret;
}
@ -677,7 +683,6 @@ static int i915_cmdbuffer(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 *hw_status = dev_priv->hw_status_page;
drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
dev_priv->sarea_priv;
drm_i915_cmdbuffer_t cmdbuf;
@ -705,7 +710,7 @@ static int i915_cmdbuffer(DRM_IOCTL_ARGS)
return ret;
}
sarea_priv->last_dispatch = (int)hw_status[5];
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
return 0;
}
@ -854,7 +859,7 @@ static int i915_mmio(DRM_IOCTL_ARGS)
return DRM_ERR(EINVAL);
e = &mmio_table[mmio.reg];
base = dev_priv->mmio_map->handle + e->offset;
base = (u8 *) dev_priv->mmio_map->handle + e->offset;
switch (mmio.read_write) {
case I915_MMIO_READ:

View File

@ -294,6 +294,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
return IRQ_NONE;
I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
(void) I915_READ16(I915REG_INT_IDENTITY_R);
DRM_READMEMORYBARRIER();
dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
@ -713,22 +715,13 @@ void i915_driver_irq_postinstall(drm_device_t * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
dev_priv->swaps_lock = SPIN_LOCK_UNLOCKED;
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
dev_priv->swaps_pending = 0;
if (!dev_priv->vblank_pipe)
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
dev_priv->swaps_lock = SPIN_LOCK_UNLOCKED;
spin_lock_init(&dev_priv->swaps_lock);
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
dev_priv->swaps_pending = 0;
dev_priv->user_irq_lock = SPIN_LOCK_UNLOCKED;
dev_priv->user_irq_refcount = 0;
if (!dev_priv->vblank_pipe)
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
i915_enable_interrupt(dev);
DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);

View File

@ -792,8 +792,7 @@ static int mach64_do_dma_init(drm_device_t * dev, drm_mach64_init_t * init)
INIT_LIST_HEAD(&dev_priv->placeholders);
INIT_LIST_HEAD(&dev_priv->pending);
DRM_GETSAREA();
dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("can not find sarea!\n");
dev->dev_private = (void *)dev_priv;

View File

@ -551,7 +551,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
drm_map_list_t *_entry;
unsigned long agp_token = 0;
list_for_each_entry(_entry, &dev->maplist->head, head) {
list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == dev->agp_buffer_map)
agp_token = _entry->user_token;
}
@ -830,8 +830,7 @@ static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
dev_priv->texture_offset = init->texture_offset[0];
dev_priv->texture_size = init->texture_size[0];
DRM_GETSAREA();
dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("failed to find sarea!\n");
return DRM_ERR(EINVAL);

View File

@ -114,7 +114,7 @@ enum nouveau_card_type {
NV_10 =10,
NV_11 =10,
NV_15 =10,
NV_17 =10,
NV_17 =17,
NV_20 =20,
NV_25 =20,
NV_30 =30,

View File

@ -51,7 +51,7 @@ int nouveau_fifo_ctx_size(drm_device_t* dev)
if (dev_priv->card_type >= NV_40)
return 128;
else if (dev_priv->card_type >= NV_10)
else if (dev_priv->card_type >= NV_17)
return 64;
else
return 32;
@ -90,10 +90,12 @@ static int nouveau_fifo_instmem_configure(drm_device_t *dev)
break;
case NV_30:
case NV_20:
case NV_10:
case NV_17:
NV_WRITE(NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset>>8) |
(1 << 16) /* 64 Bytes entry*/);
/* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
break;
case NV_10:
case NV_04:
case NV_03:
NV_WRITE(NV03_PFIFO_RAMFC, dev_priv->ramfc_offset>>8);
@ -269,11 +271,12 @@ static void nouveau_nv10_context_init(drm_device_t *dev, int channel)
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_object *cb_obj;
uint32_t fifoctx;
int ctx_size = nouveau_fifo_ctx_size(dev);
int i;
cb_obj = dev_priv->fifos[channel].cmdbuf_obj;
fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*64;
fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*ctx_size;
for (i=0;i<64;i+=4)
for (i=0;i<ctx_size;i+=4)
NV_WRITE(fifoctx + i, 0);
/* Fill entries that are seen filled in dumps of nvidia driver just
@ -327,6 +330,7 @@ static void nouveau_nv30_context_init(drm_device_t *dev, int channel)
RAMFC_WR(SEMAPHORE, NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE));
}
#if 0
static void nouveau_nv10_context_save(drm_device_t *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
@ -350,6 +354,7 @@ static void nouveau_nv10_context_save(drm_device_t *dev)
RAMFC_WR(SEMAPHORE , NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE));
RAMFC_WR(DMA_SUBROUTINE , NV_READ(NV10_PFIFO_CACHE1_DMA_SUBROUTINE));
}
#endif
#undef RAMFC_WR
#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV40_RAMFC_##offset, (val))
@ -507,6 +512,7 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp)
nouveau_nv04_context_init(dev, channel);
break;
case NV_10:
case NV_17:
nv10_graph_context_create(dev, channel);
nouveau_nv10_context_init(dev, channel);
break;

View File

@ -372,6 +372,7 @@ static void nouveau_pgraph_irq_handler(drm_device_t *dev)
nouveau_nv04_context_switch(dev);
break;
case NV_10:
case NV_17:
nouveau_nv10_context_switch(dev);
break;
case NV_20:

View File

@ -248,6 +248,7 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
}
break;
case NV_10:
case NV_17:
case NV_20:
case NV_30:
case NV_40:
@ -521,6 +522,7 @@ nouveau_instmem_configure_fixed_tables(struct drm_device *dev)
break;
case NV_30:
case NV_20:
case NV_17:
case NV_10:
case NV_04:
case NV_03:

View File

@ -167,7 +167,7 @@ nouveau_ht_object_insert(drm_device_t* dev, int channel, uint32_t handle,
while (NV_READ(ht_base + ofs) || NV_READ(ht_base + ofs + 4)) {
ofs += 8;
if (ofs == ht_end) ofs = ht_base;
if (ofs == dev_priv->ramht_size) ofs = 0;
if (ofs == o_ofs) {
DRM_ERROR("no free hash table entries\n");
return 1;

View File

@ -65,6 +65,10 @@
#define NV03_PMC_ENABLE 0x00000200
# define NV_PMC_ENABLE_PFIFO (1<< 8)
# define NV_PMC_ENABLE_PGRAPH (1<<12)
/* Disabling the below bit breaks newer (G7X only?) mobile chipsets,
* the card will hang early on in the X init process.
*/
# define NV_PMC_ENABLE_UNK13 (1<<13)
#define NV40_PMC_1700 0x00001700
#define NV40_PMC_1704 0x00001704
#define NV40_PMC_1708 0x00001708

View File

@ -8,6 +8,11 @@ nv04_mc_init(drm_device_t *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
/* Power up everything, resetting each individual unit will
* be done later if needed.
*/
NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF);
NV_WRITE(NV03_PMC_INTR_EN_0, 0);
return 0;

View File

@ -186,7 +186,7 @@ static void nv10_praph_pipe(drm_device_t *dev) {
static int nv10_graph_ctx_regs [] = {
NV03_PGRAPH_XY_LOGIC_MISC0,
//NV10_PGRAPH_CTX_SWITCH1, make ctx switch crash
NV10_PGRAPH_CTX_SWITCH1,
NV10_PGRAPH_CTX_SWITCH2,
NV10_PGRAPH_CTX_SWITCH3,
NV10_PGRAPH_CTX_SWITCH4,
@ -527,6 +527,37 @@ NV10_PGRAPH_DEBUG_4,
0x00400a04,
};
static int nv10_graph_ctx_regs_find_offset(drm_device_t *dev, int reg)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
int i, j;
for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) {
if (nv10_graph_ctx_regs[i] == reg)
return i;
}
if (dev_priv->chipset>=0x17) {
for (j = 0; j < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++,j++) {
if (nv17_graph_ctx_regs[j] == reg)
return i;
}
}
return -1;
}
static void restore_ctx_regs(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *fifo = &dev_priv->fifos[channel];
int i, j;
for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++)
NV_WRITE(nv10_graph_ctx_regs[i], fifo->pgraph_ctx[i]);
if (dev_priv->chipset>=0x17) {
for (j = 0; j < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++,j++)
NV_WRITE(nv17_graph_ctx_regs[j], fifo->pgraph_ctx[i]);
}
nouveau_wait_for_idle(dev);
}
void nouveau_nv10_context_switch(drm_device_t *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
@ -559,15 +590,8 @@ void nouveau_nv10_context_switch(drm_device_t *dev)
nouveau_wait_for_idle(dev);
// restore PGRAPH context
//XXX not working yet
#if 1
for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++)
NV_WRITE(nv10_graph_ctx_regs[i], dev_priv->fifos[channel].pgraph_ctx[i]);
if (dev_priv->chipset>=0x17) {
for (j = 0; j < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++,j++)
NV_WRITE(nv17_graph_ctx_regs[j], dev_priv->fifos[channel].pgraph_ctx[i]);
}
nouveau_wait_for_idle(dev);
restore_ctx_regs(dev, channel);
#endif
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
@ -582,20 +606,56 @@ void nouveau_nv10_context_switch(drm_device_t *dev)
NV_WRITE(NV04_PGRAPH_FIFO,0x1);
}
#define NV_WRITE_CTX(reg, val) do { \
int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \
if (offset > 0) \
fifo->pgraph_ctx[offset] = val; \
} while (0)
int nv10_graph_context_create(drm_device_t *dev, int channel) {
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *fifo = &dev_priv->fifos[channel];
uint32_t tmp, vramsz;
DRM_DEBUG("nv10_graph_context_create %d\n", channel);
memset(dev_priv->fifos[channel].pgraph_ctx, 0, sizeof(dev_priv->fifos[channel].pgraph_ctx));
memset(fifo->pgraph_ctx, 0, sizeof(fifo->pgraph_ctx));
//dev_priv->fifos[channel].pgraph_ctx_user = channel << 24;
dev_priv->fifos[channel].pgraph_ctx[0] = 0x0001ffff;
/* per channel init from ddx */
tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00;
/*XXX the original ddx code, does this in 2 steps :
* tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00;
* NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
* tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100;
* NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
*/
tmp |= 0x00020100;
NV_WRITE_CTX(NV10_PGRAPH_SURFACE, tmp);
vramsz = drm_get_resource_len(dev, 0) - 1;
NV_WRITE_CTX(NV04_PGRAPH_BOFFSET0, 0);
NV_WRITE_CTX(NV04_PGRAPH_BOFFSET1, 0);
NV_WRITE_CTX(NV04_PGRAPH_BLIMIT0 , vramsz);
NV_WRITE_CTX(NV04_PGRAPH_BLIMIT1 , vramsz);
NV_WRITE_CTX(NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
NV_WRITE_CTX(NV04_PGRAPH_BETA_AND , 0xFFFFFFFF);
NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
/* is it really needed ??? */
if (dev_priv->chipset>=0x17) {
dev_priv->fifos[channel].pgraph_ctx[sizeof(nv10_graph_ctx_regs) + 0] = NV_READ(NV10_PGRAPH_DEBUG_4);
dev_priv->fifos[channel].pgraph_ctx[sizeof(nv10_graph_ctx_regs) + 1] = NV_READ(0x004006b0);
NV_WRITE_CTX(NV10_PGRAPH_DEBUG_4, NV_READ(NV10_PGRAPH_DEBUG_4));
NV_WRITE_CTX(0x004006b0, NV_READ(0x004006b0));
}
/* for the first channel init the regs */
if (dev_priv->fifo_alloc_count == 0)
restore_ctx_regs(dev, channel);
//XXX should be saved/restored for each fifo
//we supposed here we have X fifo and only one 3D fifo.
@ -606,7 +666,6 @@ int nv10_graph_context_create(drm_device_t *dev, int channel) {
int nv10_graph_init(drm_device_t *dev) {
drm_nouveau_private_t *dev_priv = dev->dev_private;
uint32_t tmp, vramsz;
int i;
NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
@ -637,23 +696,7 @@ int nv10_graph_init(drm_device_t *dev) {
NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001);
/* the below don't belong here, per-channel context state */
tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00;
NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100;
NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
vramsz = drm_get_resource_len(dev, 0) - 1;
NV_WRITE(NV04_PGRAPH_BOFFSET0, 0);
NV_WRITE(NV04_PGRAPH_BOFFSET1, 0);
NV_WRITE(NV04_PGRAPH_BLIMIT0 , vramsz);
NV_WRITE(NV04_PGRAPH_BLIMIT1 , vramsz);
NV_WRITE(NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
NV_WRITE(NV04_PGRAPH_BETA_AND , 0xFFFFFFFF);
return 0;
}
void nv10_graph_takedown(drm_device_t *dev)

View File

@ -117,7 +117,7 @@ void nouveau_nv20_context_switch(drm_device_t *dev)
nouveau_wait_for_idle(dev);
NV_WRITE(NV03_PGRAPH_CTX_CONTROL, 0x10000000);
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000000);
nv20_graph_context_restore(dev, channel);
@ -126,7 +126,7 @@ void nouveau_nv20_context_switch(drm_device_t *dev)
if ((NV_READ(NV10_PGRAPH_CTX_USER) >> 24) != channel)
DRM_ERROR("nouveau_nv20_context_switch : wrong channel restored %x %x!!!\n", channel, NV_READ(NV10_PGRAPH_CTX_USER) >> 24);
NV_WRITE(NV03_PGRAPH_CTX_CONTROL, 0x10010100);
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
NV_WRITE(NV10_PGRAPH_FFINTFC_ST2, NV_READ(NV10_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF);
NV_WRITE(NV04_PGRAPH_FIFO,0x1);
@ -194,7 +194,7 @@ int nv20_graph_init(drm_device_t *dev) {
NV_WRITE(NV10_PGRAPH_TSTATUS(i), NV_READ(NV10_PFB_TSTATUS(i)));
}
NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10010100);
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001);

View File

@ -182,7 +182,7 @@ int nv30_graph_init(drm_device_t *dev)
NV_WRITE(NV10_PGRAPH_TSTATUS(i), NV_READ(NV10_PFB_TSTATUS(i)));
}
NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10010100);
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001);

View File

@ -10,6 +10,7 @@
*/
#define NV40_GRCTX_SIZE (175*1024)
#define NV43_GRCTX_SIZE (70*1024)
#define NV46_GRCTX_SIZE (70*1024) /* probably ~64KiB */
#define NV4A_GRCTX_SIZE (64*1024)
#define NV4C_GRCTX_SIZE (25*1024)
#define NV4E_GRCTX_SIZE (25*1024)
@ -274,6 +275,156 @@ nv43_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
INSTANCE_WR(ctx, i/4, 0x3f800000);
};
static void nv46_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
int i;
INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx));
INSTANCE_WR(ctx, 0x00040/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00044/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x0004c/4, 0x00000001);
INSTANCE_WR(ctx, 0x00138/4, 0x20010001);
INSTANCE_WR(ctx, 0x0013c/4, 0x0f73ef00);
INSTANCE_WR(ctx, 0x00144/4, 0x02008821);
INSTANCE_WR(ctx, 0x00174/4, 0x00000001);
INSTANCE_WR(ctx, 0x00178/4, 0x00000001);
INSTANCE_WR(ctx, 0x0017c/4, 0x00000001);
INSTANCE_WR(ctx, 0x00180/4, 0x00000001);
INSTANCE_WR(ctx, 0x00184/4, 0x00000001);
INSTANCE_WR(ctx, 0x00188/4, 0x00000001);
INSTANCE_WR(ctx, 0x0018c/4, 0x00000001);
INSTANCE_WR(ctx, 0x00190/4, 0x00000001);
INSTANCE_WR(ctx, 0x00194/4, 0x00000040);
INSTANCE_WR(ctx, 0x00198/4, 0x00000040);
INSTANCE_WR(ctx, 0x0019c/4, 0x00000040);
INSTANCE_WR(ctx, 0x001a4/4, 0x00000040);
INSTANCE_WR(ctx, 0x001ec/4, 0x0b0b0b0c);
INSTANCE_WR(ctx, 0x0035c/4, 0x00040000);
INSTANCE_WR(ctx, 0x0036c/4, 0x55555555);
INSTANCE_WR(ctx, 0x00370/4, 0x55555555);
INSTANCE_WR(ctx, 0x00374/4, 0x55555555);
INSTANCE_WR(ctx, 0x00378/4, 0x55555555);
INSTANCE_WR(ctx, 0x003a4/4, 0x00000008);
INSTANCE_WR(ctx, 0x003b8/4, 0x00003010);
INSTANCE_WR(ctx, 0x003dc/4, 0x00000111);
INSTANCE_WR(ctx, 0x003e0/4, 0x00000111);
INSTANCE_WR(ctx, 0x003e4/4, 0x00000111);
INSTANCE_WR(ctx, 0x003e8/4, 0x00000111);
INSTANCE_WR(ctx, 0x003ec/4, 0x00000111);
INSTANCE_WR(ctx, 0x003f0/4, 0x00000111);
INSTANCE_WR(ctx, 0x003f4/4, 0x00000111);
INSTANCE_WR(ctx, 0x003f8/4, 0x00000111);
INSTANCE_WR(ctx, 0x003fc/4, 0x00000111);
INSTANCE_WR(ctx, 0x00400/4, 0x00000111);
INSTANCE_WR(ctx, 0x00404/4, 0x00000111);
INSTANCE_WR(ctx, 0x00408/4, 0x00000111);
INSTANCE_WR(ctx, 0x0040c/4, 0x00000111);
INSTANCE_WR(ctx, 0x00410/4, 0x00000111);
INSTANCE_WR(ctx, 0x00414/4, 0x00000111);
INSTANCE_WR(ctx, 0x00418/4, 0x00000111);
INSTANCE_WR(ctx, 0x004b0/4, 0x00000111);
INSTANCE_WR(ctx, 0x004b4/4, 0x00080060);
INSTANCE_WR(ctx, 0x004d0/4, 0x00000080);
INSTANCE_WR(ctx, 0x004d4/4, 0xffff0000);
INSTANCE_WR(ctx, 0x004d8/4, 0x00000001);
INSTANCE_WR(ctx, 0x004ec/4, 0x46400000);
INSTANCE_WR(ctx, 0x004fc/4, 0xffff0000);
INSTANCE_WR(ctx, 0x00500/4, 0x88888888);
INSTANCE_WR(ctx, 0x00504/4, 0x88888888);
INSTANCE_WR(ctx, 0x00508/4, 0x88888888);
INSTANCE_WR(ctx, 0x0050c/4, 0x88888888);
INSTANCE_WR(ctx, 0x00510/4, 0x88888888);
INSTANCE_WR(ctx, 0x00514/4, 0x88888888);
INSTANCE_WR(ctx, 0x00518/4, 0x88888888);
INSTANCE_WR(ctx, 0x0051c/4, 0x88888888);
INSTANCE_WR(ctx, 0x00520/4, 0x88888888);
INSTANCE_WR(ctx, 0x00524/4, 0x88888888);
INSTANCE_WR(ctx, 0x00528/4, 0x88888888);
INSTANCE_WR(ctx, 0x0052c/4, 0x88888888);
INSTANCE_WR(ctx, 0x00530/4, 0x88888888);
INSTANCE_WR(ctx, 0x00534/4, 0x88888888);
INSTANCE_WR(ctx, 0x00538/4, 0x88888888);
INSTANCE_WR(ctx, 0x0053c/4, 0x88888888);
INSTANCE_WR(ctx, 0x00550/4, 0x0fff0000);
INSTANCE_WR(ctx, 0x00554/4, 0x0fff0000);
INSTANCE_WR(ctx, 0x0055c/4, 0x00011100);
for (i=0x00578; i<0x005b4; i+=4)
INSTANCE_WR(ctx, i/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005c0/4, 0x4b7fffff);
INSTANCE_WR(ctx, 0x005e8/4, 0x30201000);
INSTANCE_WR(ctx, 0x005ec/4, 0x70605040);
INSTANCE_WR(ctx, 0x005f0/4, 0xb8a89888);
INSTANCE_WR(ctx, 0x005f4/4, 0xf8e8d8c8);
INSTANCE_WR(ctx, 0x00608/4, 0x40100000);
INSTANCE_WR(ctx, 0x00624/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00658/4, 0x435185d6);
INSTANCE_WR(ctx, 0x0065c/4, 0x2155b699);
INSTANCE_WR(ctx, 0x00660/4, 0xfedcba98);
INSTANCE_WR(ctx, 0x00664/4, 0x00000098);
INSTANCE_WR(ctx, 0x00674/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00678/4, 0x00ff7000);
INSTANCE_WR(ctx, 0x0067c/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x0068c/4, 0x00ff0000);
INSTANCE_WR(ctx, 0x006c8/4, 0x00ffff00);
for (i=0x0070c; i<=0x00748; i+=4)
INSTANCE_WR(ctx, i/4, 0x00018488);
for (i=0x0074c; i<=0x00788; i+=4)
INSTANCE_WR(ctx, i/4, 0x00028202);
for (i=0x007cc; i<=0x00808; i+=4)
INSTANCE_WR(ctx, i/4, 0x0000aae4);
for (i=0x0080c; i<=0x00848; i+=4)
INSTANCE_WR(ctx, i/4, 0x01012000);
for (i=0x0084c; i<=0x00888; i+=4)
INSTANCE_WR(ctx, i/4, 0x00080008);
for (i=0x008cc; i<=0x00908; i+=4)
INSTANCE_WR(ctx, i/4, 0x00100008);
for (i=0x0095c; i<=0x00968; i+=4)
INSTANCE_WR(ctx, i/4, 0x0001bc80);
for (i=0x0096c; i<=0x00978; i+=4)
INSTANCE_WR(ctx, i/4, 0x00000202);
for (i=0x0098c; i<=0x00998; i+=4)
INSTANCE_WR(ctx, i/4, 0x00000008);
for (i=0x009ac; i<=0x009b8; i+=4)
INSTANCE_WR(ctx, i/4, 0x00080008);
INSTANCE_WR(ctx, 0x009cc/4, 0x00000002);
INSTANCE_WR(ctx, 0x00a00/4, 0x00000421);
INSTANCE_WR(ctx, 0x00a04/4, 0x030c30c3);
INSTANCE_WR(ctx, 0x00a08/4, 0x00011001);
INSTANCE_WR(ctx, 0x00a14/4, 0x3e020200);
INSTANCE_WR(ctx, 0x00a18/4, 0x00ffffff);
INSTANCE_WR(ctx, 0x00a1c/4, 0x0c103f00);
INSTANCE_WR(ctx, 0x00a28/4, 0x00040000);
INSTANCE_WR(ctx, 0x00a60/4, 0x00008100);
INSTANCE_WR(ctx, 0x00aec/4, 0x00000001);
INSTANCE_WR(ctx, 0x00b30/4, 0x00001001);
INSTANCE_WR(ctx, 0x00b38/4, 0x00000003);
INSTANCE_WR(ctx, 0x00b3c/4, 0x00888001);
INSTANCE_WR(ctx, 0x00bc0/4, 0x00000005);
INSTANCE_WR(ctx, 0x00bcc/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00be8/4, 0x00005555);
INSTANCE_WR(ctx, 0x00bec/4, 0x00005555);
INSTANCE_WR(ctx, 0x00bf0/4, 0x00005555);
INSTANCE_WR(ctx, 0x00bf4/4, 0x00000001);
INSTANCE_WR(ctx, 0x00c2c/4, 0x00000001);
INSTANCE_WR(ctx, 0x00c30/4, 0x08e00001);
INSTANCE_WR(ctx, 0x00c34/4, 0x000e3000);
for (i=0x017f8; i<=0x01870; i+=8)
INSTANCE_WR(ctx, i/4, 0x3f800000);
for (i=0x035b8; i<=0x057a8; i+=24)
INSTANCE_WR(ctx, i/4, 0x00000001);
for (i=0x057b8; i<=0x05ba8; i+=16)
INSTANCE_WR(ctx, i/4, 0x3f800000);
for (i=0x07f38; i<=0x0a128; i+=24)
INSTANCE_WR(ctx, i/4, 0x00000001);
for (i=0x0a138; i<=0x0a528; i+=16)
INSTANCE_WR(ctx, i/4, 0x3f800000);
for (i=0x0c8b8; i<=0x0eaa8; i+=24)
INSTANCE_WR(ctx, i/4, 0x00000001);
for (i=0x0eab8; i<=0x0eea8; i+=16)
INSTANCE_WR(ctx, i/4, 0x3f800000);
}
static void nv4a_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
@ -622,6 +773,10 @@ nv40_graph_context_create(drm_device_t *dev, int channel)
ctx_size = NV43_GRCTX_SIZE;
ctx_init = nv43_graph_context_init;
break;
case 0x46:
ctx_size = NV46_GRCTX_SIZE;
ctx_init = nv46_graph_context_init;
break;
case 0x4a:
ctx_size = NV4A_GRCTX_SIZE;
ctx_init = nv4a_graph_context_init;
@ -821,6 +976,37 @@ static uint32_t nv43_ctx_voodoo[] = {
~0
};
static uint32_t nv46_ctx_voodoo[] = {
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306,
0x0040a068, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968,
0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
0x004020e6, 0x007000a0, 0x00500060, 0x00200008, 0x0060000a, 0x0011814d,
0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
0x00500060, 0x00403f87, 0x0060000d, 0x004079e6, 0x002000f7, 0x0060000a,
0x00200045, 0x00100620, 0x00104668, 0x0017466d, 0x0011068b, 0x00168691,
0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, 0x00200022,
0x001006cc, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7, 0x001043e1,
0x00500060, 0x0020027f, 0x0060000a, 0x00104800, 0x00108901, 0x00104910,
0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00, 0x00108a14,
0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, 0x0010cd08, 0x00104d80,
0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, 0x00105406, 0x00105709,
0x00200316, 0x0060000a, 0x00300000, 0x00200080, 0x00407200, 0x00200084,
0x00800001, 0x0020055e, 0x0060000a, 0x002037e0, 0x0040788a, 0x00201320,
0x00800029, 0x00408900, 0x00600006, 0x004085e6, 0x00700080, 0x00200081,
0x0060000a, 0x00104280, 0x00200316, 0x0060000a, 0x00200004, 0x00800001,
0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a068, 0x00700000,
0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060,
0x00600007, 0x00409388, 0x0060000f, 0x00500060, 0x00200000, 0x0060000a,
0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020,
0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a206, 0x0040a305,
0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
};
static uint32_t nv4a_ctx_voodoo[] = {
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409965, 0x00409e06,
@ -891,7 +1077,7 @@ static uint32_t nv4e_ctx_voodoo[] = {
* G70 0x47
* G71 0x49
* NV45 0x48
* G72 0x46
* G72[M] 0x46
* G73 0x4b
* C51_G7X 0x4c
* C51 0x4e
@ -913,6 +1099,7 @@ nv40_graph_init(drm_device_t *dev)
switch (dev_priv->chipset) {
case 0x40: ctx_voodoo = nv40_ctx_voodoo; break;
case 0x43: ctx_voodoo = nv43_ctx_voodoo; break;
case 0x46: ctx_voodoo = nv46_ctx_voodoo; break;
case 0x4a: ctx_voodoo = nv4a_ctx_voodoo; break;
case 0x4e: ctx_voodoo = nv4e_ctx_voodoo; break;
default:
@ -947,7 +1134,7 @@ nv40_graph_init(drm_device_t *dev)
NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00008000);
NV_WRITE(NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10010100);
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001);

View File

@ -9,6 +9,11 @@ nv40_mc_init(drm_device_t *dev)
drm_nouveau_private_t *dev_priv = dev->dev_private;
uint32_t tmp;
/* Power up everything, resetting each individual unit will
* be done later if needed.
*/
NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF);
NV_WRITE(NV03_PMC_INTR_EN_0, 0);
switch (dev_priv->chipset) {

View File

@ -456,8 +456,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
(dev_priv->span_offset >> 5));
DRM_GETSAREA();
dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
dev->dev_private = (void *)dev_priv;
@ -563,7 +562,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
dev_priv->gart_info.table_size = R128_PCIGART_TABLE_SIZE;
dev_priv->gart_info.addr = NULL;
dev_priv->gart_info.bus_addr = 0;
dev_priv->gart_info.is_pcie = 0;
dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
DRM_ERROR("failed to init PCI GART!\n");
dev->dev_private = (void *)dev_priv;

View File

@ -148,15 +148,15 @@ void r300_init_reg_flags(void)
/* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
ADD_RANGE(0x2080, 1);
ADD_RANGE(R300_VAP_CNTL, 1);
ADD_RANGE(R300_SE_VTE_CNTL, 2);
ADD_RANGE(0x2134, 2);
ADD_RANGE(0x2140, 1);
ADD_RANGE(R300_VAP_CNTL_STATUS, 1);
ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
ADD_RANGE(0x21DC, 1);
ADD_RANGE(0x221C, 1);
ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
ADD_RANGE(0x2220, 4);
ADD_RANGE(0x2288, 1);
ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
ADD_RANGE(R300_GB_ENABLE, 1);
@ -168,13 +168,13 @@ void r300_init_reg_flags(void)
ADD_RANGE(R300_RE_POINTSIZE, 1);
ADD_RANGE(0x4230, 3);
ADD_RANGE(R300_RE_LINE_CNT, 1);
ADD_RANGE(0x4238, 1);
ADD_RANGE(R300_RE_UNK4238, 1);
ADD_RANGE(0x4260, 3);
ADD_RANGE(0x4274, 4);
ADD_RANGE(0x4288, 5);
ADD_RANGE(0x42A0, 1);
ADD_RANGE(R300_RE_SHADE, 4);
ADD_RANGE(R300_RE_POLYGON_MODE, 5);
ADD_RANGE(R300_RE_ZBIAS_CNTL, 1);
ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
ADD_RANGE(0x42B4, 1);
ADD_RANGE(R300_RE_OCCLUSION_CNTL, 1);
ADD_RANGE(R300_RE_CULL_CNTL, 1);
ADD_RANGE(0x42C0, 2);
ADD_RANGE(R300_RS_CNTL_0, 2);
@ -190,22 +190,22 @@ void r300_init_reg_flags(void)
ADD_RANGE(R300_PFS_INSTR1_0, 64);
ADD_RANGE(R300_PFS_INSTR2_0, 64);
ADD_RANGE(R300_PFS_INSTR3_0, 64);
ADD_RANGE(0x4BC0, 1);
ADD_RANGE(0x4BC8, 3);
ADD_RANGE(R300_RE_FOG_STATE, 1);
ADD_RANGE(R300_FOG_COLOR_R, 3);
ADD_RANGE(R300_PP_ALPHA_TEST, 2);
ADD_RANGE(0x4BD8, 1);
ADD_RANGE(R300_PFS_PARAM_0_X, 64);
ADD_RANGE(0x4E00, 1);
ADD_RANGE(R300_RB3D_CBLEND, 2);
ADD_RANGE(R300_RB3D_COLORMASK, 1);
ADD_RANGE(0x4E10, 3);
ADD_RANGE(R300_RB3D_BLEND_COLOR, 3);
ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */
ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
ADD_RANGE(0x4E50, 9);
ADD_RANGE(0x4E88, 1);
ADD_RANGE(0x4EA0, 2);
ADD_RANGE(R300_RB3D_ZSTENCIL_CNTL_0, 3);
ADD_RANGE(0x4F10, 4);
ADD_RANGE(R300_RB3D_ZSTENCIL_FORMAT, 4);
ADD_RANGE_MARK(R300_RB3D_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
ADD_RANGE(R300_RB3D_DEPTHPITCH, 1);
ADD_RANGE(0x4F28, 1);

View File

@ -23,6 +23,8 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
/* *INDENT-OFF* */
#ifndef _R300_REG_H
#define _R300_REG_H
@ -145,6 +147,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
# define R300_VC_NO_SWAP (0 << 0)
# define R300_VC_16BIT_SWAP (1 << 0)
# define R300_VC_32BIT_SWAP (2 << 0)
# define R300_VAP_TCL_BYPASS (1 << 8)
/* gap */
@ -325,7 +328,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
* Most likely this is used to ignore rest of the program in cases
* where group of verts arent visible. For some reason this "section"
* is sometimes accepted other instruction that have no relationship with
*position calculations.
*position calculations.
*/
#define R300_VAP_PVS_CNTL_1 0x22D0
# define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0
@ -487,6 +490,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
# define R300_GB_W_SELECT_1 (1<<4)
#define R300_GB_AA_CONFIG 0x4020
# define R300_AA_DISABLE 0x00
# define R300_AA_ENABLE 0x01
# define R300_AA_SUBSAMPLES_2 0
# define R300_AA_SUBSAMPLES_3 (1<<1)
@ -497,6 +501,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
/* Zero to flush caches. */
#define R300_TX_CNTL 0x4100
#define R300_TX_FLUSH 0x0
/* The upper enable bits are guessed, based on fglrx reported limits. */
#define R300_TX_ENABLE 0x4104
@ -565,12 +570,13 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
#define R300_RE_FOG_SCALE 0x4294
#define R300_RE_FOG_START 0x4298
/* Not sure why there are duplicate of factor and constant values.
* My best guess so far is that there are seperate zbiases for test and write.
/* Not sure why there are duplicate of factor and constant values.
* My best guess so far is that there are seperate zbiases for test and write.
* Ordering might be wrong.
* Some of the tests indicate that fgl has a fallback implementation of zbias
* via pixel shaders.
*/
#define R300_RE_ZBIAS_CNTL 0x42A0 /* GUESS */
#define R300_RE_ZBIAS_T_FACTOR 0x42A4
#define R300_RE_ZBIAS_T_CONSTANT 0x42A8
#define R300_RE_ZBIAS_W_FACTOR 0x42AC
@ -667,6 +673,11 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
/* Special handling for color: When the fragment program uses color,
* the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the
* color register index.
*
* Apperently you may set the R300_RS_ROUTE_0_COLOR bit, but not provide any
* R300_RS_ROUTE_0_COLOR_DEST value; this setup is used for clearing the state.
* See r300_ioctl.c:r300EmitClearState. I'm not sure if this setup is strictly
* correct or not. - Oliver.
*/
# define R300_RS_ROUTE_0_COLOR (1 << 14)
# define R300_RS_ROUTE_0_COLOR_DEST_SHIFT 17
@ -907,7 +918,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
/* 32 bit chroma key */
#define R300_TX_CHROMA_KEY_0 0x4580
/* ff00ff00 == { 0, 1.0, 0, 1.0 } */
#define R300_TX_BORDER_COLOR_0 0x45C0
#define R300_TX_BORDER_COLOR_0 0x45C0
/* END: Texture specification */
@ -997,6 +1008,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
# define R300_FPITX_OP_KIL 2
# define R300_FPITX_OP_TXP 3
# define R300_FPITX_OP_TXB 4
# define R300_FPITX_OPCODE_MASK (7 << 15)
/* ALU
* The ALU instructions register blocks are enumerated according to the order
@ -1045,7 +1057,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
* WRT swizzling. If, for example, you want to load an R component into an
* Alpha operand, this R component is taken from a *color* source, not from
* an alpha source. The corresponding register doesn't even have to appear in
* the alpha sources list. (I hope this alll makes sense to you)
* the alpha sources list. (I hope this all makes sense to you)
*
* Destination selection
* The destination register index is in FPI1 (color) and FPI3 (alpha)
@ -1072,6 +1084,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
# define R300_FPI1_SRC2C_SHIFT 12
# define R300_FPI1_SRC2C_MASK (31 << 12)
# define R300_FPI1_SRC2C_CONST (1 << 17)
# define R300_FPI1_SRC_MASK 0x0003ffff
# define R300_FPI1_DSTC_SHIFT 18
# define R300_FPI1_DSTC_MASK (31 << 18)
# define R300_FPI1_DSTC_REG_MASK_SHIFT 23
@ -1093,6 +1106,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
# define R300_FPI3_SRC2A_SHIFT 12
# define R300_FPI3_SRC2A_MASK (31 << 12)
# define R300_FPI3_SRC2A_CONST (1 << 17)
# define R300_FPI3_SRC_MASK 0x0003ffff
# define R300_FPI3_DSTA_SHIFT 18
# define R300_FPI3_DSTA_MASK (31 << 18)
# define R300_FPI3_DSTA_REG (1 << 23)
@ -1548,6 +1562,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
#define R300_PRIM_COLOR_ORDER_BGRA (0 << 6)
#define R300_PRIM_COLOR_ORDER_RGBA (1 << 6)
#define R300_PRIM_NUM_VERTICES_SHIFT 16
#define R300_PRIM_NUM_VERTICES_MASK 0xffff
/* Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR.
* Two parameter dwords:
@ -1596,5 +1611,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#define R300_CP_CMD_BITBLT_MULTI 0xC0009B00
#endif /* _R300_REG_H */
/* *INDENT-ON* */

View File

@ -824,12 +824,21 @@ static int RADEON_READ_PLL(drm_device_t * dev, int addr)
return RADEON_READ(RADEON_CLOCK_CNTL_DATA);
}
static int RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
{
RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff);
return RADEON_READ(RADEON_PCIE_DATA);
}
static u32 RADEON_READ_IGPGART(drm_radeon_private_t *dev_priv, int addr)
{
u32 ret;
RADEON_WRITE(RADEON_IGPGART_INDEX, addr & 0x7f);
ret = RADEON_READ(RADEON_IGPGART_DATA);
RADEON_WRITE(RADEON_IGPGART_INDEX, 0x7f);
return ret;
}
#if RADEON_FIFO_DEBUG
static void radeon_status(drm_radeon_private_t * dev_priv)
{
@ -1266,7 +1275,45 @@ static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
}
}
/* Enable or disable PCI-E GART on the chip */
/* Enable or disable IGP GART on the chip */
static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
{
u32 temp, tmp;
tmp = RADEON_READ(RADEON_AIC_CNTL);
DRM_DEBUG("setting igpgart AIC CNTL is %08X\n", tmp);
if (on) {
DRM_DEBUG("programming igpgart %08X %08lX %08X\n",
dev_priv->gart_vm_start,
(long)dev_priv->gart_info.bus_addr,
dev_priv->gart_size);
RADEON_WRITE_IGPGART(RADEON_IGPGART_UNK_18, 0x1000);
RADEON_WRITE_IGPGART(RADEON_IGPGART_ENABLE, 0x1);
RADEON_WRITE_IGPGART(RADEON_IGPGART_CTRL, 0x42040800);
RADEON_WRITE_IGPGART(RADEON_IGPGART_BASE_ADDR,
dev_priv->gart_info.bus_addr);
temp = RADEON_READ_IGPGART(dev_priv, RADEON_IGPGART_UNK_39);
RADEON_WRITE_IGPGART(RADEON_IGPGART_UNK_39, temp);
RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev_priv->gart_vm_start);
dev_priv->gart_size = 32*1024*1024;
RADEON_WRITE(RADEON_MC_AGP_LOCATION,
(((dev_priv->gart_vm_start - 1 +
dev_priv->gart_size) & 0xffff0000) |
(dev_priv->gart_vm_start >> 16)));
temp = RADEON_READ_IGPGART(dev_priv, RADEON_IGPGART_ENABLE);
RADEON_WRITE_IGPGART(RADEON_IGPGART_ENABLE, temp);
RADEON_READ_IGPGART(dev_priv, RADEON_IGPGART_FLUSH);
RADEON_WRITE_IGPGART(RADEON_IGPGART_FLUSH, 0x1);
RADEON_READ_IGPGART(dev_priv, RADEON_IGPGART_FLUSH);
RADEON_WRITE_IGPGART(RADEON_IGPGART_FLUSH, 0x0);
}
}
static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
{
u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL);
@ -1301,6 +1348,11 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
{
u32 tmp;
if (dev_priv->flags & RADEON_IS_IGPGART) {
radeon_set_igpgart(dev_priv, on);
return;
}
if (dev_priv->flags & RADEON_IS_PCIE) {
radeon_set_pciegart(dev_priv, on);
return;
@ -1339,8 +1391,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
DRM_DEBUG("\n");
/* if we require new memory map but we don't have it fail */
if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap)
{
if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
@ -1372,6 +1423,10 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
return DRM_ERR(EINVAL);
}
/* Enable vblank on CRTC1 for older X servers
*/
dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
switch(init->func) {
case RADEON_INIT_R200_CP:
dev_priv->microcode_version = UCODE_R200;
@ -1453,13 +1508,13 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
RADEON_ROUND_MODE_TRUNC |
RADEON_ROUND_PREC_8TH_PIX);
DRM_GETSAREA();
dev_priv->ring_offset = init->ring_offset;
dev_priv->ring_rptr_offset = init->ring_rptr_offset;
dev_priv->buffers_offset = init->buffers_offset;
dev_priv->gart_textures_offset = init->gart_textures_offset;
dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
radeon_do_cleanup_cp(dev);
@ -1635,8 +1690,10 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
dev_priv->gart_info.addr =
dev_priv->gart_info.mapping.handle;
dev_priv->gart_info.is_pcie =
!!(dev_priv->flags & RADEON_IS_PCIE);
if (dev_priv->flags & RADEON_IS_PCIE)
dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE;
else
dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
dev_priv->gart_info.gart_table_location =
DRM_ATI_GART_FB;
@ -1644,6 +1701,10 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
dev_priv->gart_info.addr,
dev_priv->pcigart_offset);
} else {
if (dev_priv->flags & RADEON_IS_IGPGART)
dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP;
else
dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
dev_priv->gart_info.gart_table_location =
DRM_ATI_GART_MAIN;
dev_priv->gart_info.addr = NULL;

View File

@ -664,6 +664,7 @@ typedef struct drm_radeon_indirect {
#define RADEON_PARAM_GART_TEX_HANDLE 10
#define RADEON_PARAM_SCRATCH_OFFSET 11
#define RADEON_PARAM_CARD_TYPE 12
#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */
typedef struct drm_radeon_getparam {
int param;
@ -718,7 +719,7 @@ typedef struct drm_radeon_setparam {
#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */
#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */
#define RADEON_SETPARAM_VBLANK_CRTC 6 /* VBLANK CRTC */
/* 1.14: Clients can allocate/free a surface
*/
typedef struct drm_radeon_surface_alloc {
@ -731,5 +732,7 @@ typedef struct drm_radeon_surface_free {
unsigned int address;
} drm_radeon_surface_free_t;
#define DRM_RADEON_VBLANK_CRTC1 1
#define DRM_RADEON_VBLANK_CRTC2 2
#endif

View File

@ -96,10 +96,12 @@
* 1.25- Add support for r200 vertex programs (R200_EMIT_VAP_PVS_CNTL,
* new packet type)
* 1.26- Add support for variable size PCI(E) gart aperture
* 1.27- Add support for IGP GART
* 1.28- Add support for VBL on CRTC2
*/
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 26
#define DRIVER_MINOR 28
#define DRIVER_PATCHLEVEL 0
#if defined(__linux__)
@ -150,6 +152,7 @@ enum radeon_chip_flags {
RADEON_IS_PCIE = 0x00200000UL,
RADEON_NEW_MEMMAP = 0x00400000UL,
RADEON_IS_PCI = 0x00800000UL,
RADEON_IS_IGPGART = 0x01000000UL,
};
#define GET_RING_HEAD(dev_priv) (dev_priv->writeback_works ? \
@ -248,7 +251,6 @@ typedef struct drm_radeon_private {
int do_boxes;
int page_flipping;
int current_page;
u32 color_fmt;
unsigned int front_offset;
@ -284,6 +286,10 @@ typedef struct drm_radeon_private {
wait_queue_head_t irq_queue;
int counter;
int vblank_crtc;
uint32_t irq_enable_reg;
int irq_enabled;
struct radeon_surface surfaces[RADEON_MAX_SURFACES];
struct radeon_virt_surface virt_surfaces[2*RADEON_MAX_SURFACES];
@ -360,10 +366,14 @@ extern int radeon_emit_irq(drm_device_t * dev);
extern void radeon_do_release(drm_device_t * dev);
extern int radeon_driver_vblank_wait(drm_device_t * dev,
unsigned int *sequence);
extern int radeon_driver_vblank_wait2(drm_device_t * dev,
unsigned int *sequence);
extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
extern void radeon_driver_irq_preinstall(drm_device_t * dev);
extern void radeon_driver_irq_postinstall(drm_device_t * dev);
extern void radeon_driver_irq_uninstall(drm_device_t * dev);
extern int radeon_vblank_crtc_get(drm_device_t *dev);
extern int radeon_vblank_crtc_set(drm_device_t *dev, int64_t value);
extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
extern int radeon_driver_unload(struct drm_device *dev);
@ -462,6 +472,16 @@ extern int radeon_move(drm_buffer_object_t * bo,
#define RADEON_PCIE_TX_GART_END_LO 0x16
#define RADEON_PCIE_TX_GART_END_HI 0x17
#define RADEON_IGPGART_INDEX 0x168
#define RADEON_IGPGART_DATA 0x16c
#define RADEON_IGPGART_UNK_18 0x18
#define RADEON_IGPGART_CTRL 0x2b
#define RADEON_IGPGART_BASE_ADDR 0x2c
#define RADEON_IGPGART_FLUSH 0x2e
#define RADEON_IGPGART_ENABLE 0x38
#define RADEON_IGPGART_UNK_39 0x39
#define RADEON_MPP_TB_CONFIG 0x01c0
#define RADEON_MEM_CNTL 0x0140
#define RADEON_MEM_SDRAM_MODE_REG 0x0158
@ -514,12 +534,15 @@ extern int radeon_move(drm_buffer_object_t * bo,
#define RADEON_GEN_INT_CNTL 0x0040
# define RADEON_CRTC_VBLANK_MASK (1 << 0)
# define RADEON_CRTC2_VBLANK_MASK (1 << 9)
# define RADEON_GUI_IDLE_INT_ENABLE (1 << 19)
# define RADEON_SW_INT_ENABLE (1 << 25)
#define RADEON_GEN_INT_STATUS 0x0044
# define RADEON_CRTC_VBLANK_STAT (1 << 0)
# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
# define RADEON_CRTC2_VBLANK_STAT (1 << 9)
# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
# define RADEON_GUI_IDLE_INT_TEST_ACK (1 << 19)
# define RADEON_SW_INT_TEST (1 << 25)
# define RADEON_SW_INT_TEST_ACK (1 << 25)
@ -1024,6 +1047,14 @@ do { \
RADEON_WRITE( RADEON_CLOCK_CNTL_DATA, (val) ); \
} while (0)
#define RADEON_WRITE_IGPGART( addr, val ) \
do { \
RADEON_WRITE( RADEON_IGPGART_INDEX, \
((addr) & 0x7f) | (1 << 8)); \
RADEON_WRITE( RADEON_IGPGART_DATA, (val) ); \
RADEON_WRITE( RADEON_IGPGART_INDEX, 0x7f ); \
} while (0)
#define RADEON_WRITE_PCIE( addr, val ) \
do { \
RADEON_WRITE8( RADEON_PCIE_INDEX, \

View File

@ -72,8 +72,7 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
/* Only consider the bits we're interested in - others could be used
* outside the DRM
*/
stat = radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
RADEON_CRTC_VBLANK_STAT));
stat = radeon_acknowledge_irqs(dev_priv, dev_priv->irq_enable_reg);
if (!stat)
return IRQ_NONE;
@ -86,8 +85,22 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
}
/* VBLANK interrupt */
if (stat & RADEON_CRTC_VBLANK_STAT) {
atomic_inc(&dev->vbl_received);
if (stat & (RADEON_CRTC_VBLANK_STAT|RADEON_CRTC2_VBLANK_STAT)) {
int vblank_crtc = dev_priv->vblank_crtc;
if ((vblank_crtc &
(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) ==
(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
if (stat & RADEON_CRTC_VBLANK_STAT)
atomic_inc(&dev->vbl_received);
if (stat & RADEON_CRTC2_VBLANK_STAT)
atomic_inc(&dev->vbl_received2);
} else if (((stat & RADEON_CRTC_VBLANK_STAT) &&
(vblank_crtc & DRM_RADEON_VBLANK_CRTC1)) ||
((stat & RADEON_CRTC2_VBLANK_STAT) &&
(vblank_crtc & DRM_RADEON_VBLANK_CRTC2)))
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals(dev);
}
@ -129,19 +142,30 @@ static int radeon_wait_irq(drm_device_t * dev, int irq_nr)
return ret;
}
int radeon_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
int radeon_driver_vblank_do_wait(drm_device_t * dev, unsigned int *sequence,
int crtc)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
unsigned int cur_vblank;
int ret = 0;
int ack = 0;
atomic_t *counter;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
}
radeon_acknowledge_irqs(dev_priv, RADEON_CRTC_VBLANK_STAT);
if (crtc == DRM_RADEON_VBLANK_CRTC1) {
counter = &dev->vbl_received;
ack |= RADEON_CRTC_VBLANK_STAT;
} else if (crtc == DRM_RADEON_VBLANK_CRTC2) {
counter = &dev->vbl_received2;
ack |= RADEON_CRTC2_VBLANK_STAT;
} else
return DRM_ERR(EINVAL);
radeon_acknowledge_irqs(dev_priv, ack);
dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
@ -150,7 +174,7 @@ int radeon_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
* using vertical blanks...
*/
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(&dev->vbl_received))
(((cur_vblank = atomic_read(counter))
- *sequence) <= (1 << 23)));
*sequence = cur_vblank;
@ -158,6 +182,16 @@ int radeon_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
return ret;
}
int radeon_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
{
return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC1);
}
int radeon_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence)
{
return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC2);
}
/* Needs the lock as it touches the ring.
*/
int radeon_irq_emit(DRM_IOCTL_ARGS)
@ -206,6 +240,21 @@ int radeon_irq_wait(DRM_IOCTL_ARGS)
return radeon_wait_irq(dev, irqwait.irq_seq);
}
static void radeon_enable_interrupt(drm_device_t *dev)
{
drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
dev_priv->irq_enable_reg = RADEON_SW_INT_ENABLE;
if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC1)
dev_priv->irq_enable_reg |= RADEON_CRTC_VBLANK_MASK;
if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC2)
dev_priv->irq_enable_reg |= RADEON_CRTC2_VBLANK_MASK;
RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
dev_priv->irq_enabled = 1;
}
/* drm_dma.h hooks
*/
void radeon_driver_irq_preinstall(drm_device_t * dev)
@ -229,9 +278,7 @@ void radeon_driver_irq_postinstall(drm_device_t * dev)
dev_priv->counter = 0;
DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
/* Turn on SW and VBL ints */
RADEON_WRITE(RADEON_GEN_INT_CNTL,
RADEON_CRTC_VBLANK_MASK | RADEON_SW_INT_ENABLE);
radeon_enable_interrupt(dev);
}
void radeon_driver_irq_uninstall(drm_device_t * dev)
@ -241,6 +288,38 @@ void radeon_driver_irq_uninstall(drm_device_t * dev)
if (!dev_priv)
return;
dev_priv->irq_enabled = 0;
/* Disable *all* interrupts */
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
}
int radeon_vblank_crtc_get(drm_device_t *dev)
{
drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
u32 flag;
u32 value;
flag = RADEON_READ(RADEON_GEN_INT_CNTL);
value = 0;
if (flag & RADEON_CRTC_VBLANK_MASK)
value |= DRM_RADEON_VBLANK_CRTC1;
if (flag & RADEON_CRTC2_VBLANK_MASK)
value |= DRM_RADEON_VBLANK_CRTC2;
return value;
}
int radeon_vblank_crtc_set(drm_device_t *dev, int64_t value)
{
drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value);
return DRM_ERR(EINVAL);
}
dev_priv->vblank_crtc = (unsigned int)value;
radeon_enable_interrupt(dev);
return 0;
}

View File

@ -773,7 +773,7 @@ static void radeon_clear_box(drm_radeon_private_t * dev_priv,
RADEON_GMC_SRC_DATATYPE_COLOR |
RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS);
if (dev_priv->page_flipping && dev_priv->current_page == 1) {
if (dev_priv->sarea_priv->pfCurrentPage == 1) {
OUT_RING(dev_priv->front_pitch_offset);
} else {
OUT_RING(dev_priv->back_pitch_offset);
@ -861,7 +861,7 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
dev_priv->stats.clears++;
if (dev_priv->page_flipping && dev_priv->current_page == 1) {
if (dev_priv->sarea_priv->pfCurrentPage == 1) {
unsigned int tmp = flags;
flags &= ~(RADEON_FRONT | RADEON_BACK);
@ -1382,7 +1382,7 @@ static void radeon_cp_dispatch_swap(drm_device_t * dev)
/* Make this work even if front & back are flipped:
*/
OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1));
if (dev_priv->current_page == 0) {
if (dev_priv->sarea_priv->pfCurrentPage == 0) {
OUT_RING(dev_priv->back_pitch_offset);
OUT_RING(dev_priv->front_pitch_offset);
} else {
@ -1416,12 +1416,12 @@ static void radeon_cp_dispatch_flip(drm_device_t * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_sarea_t *sarea = (drm_sarea_t *) dev_priv->sarea->handle;
int offset = (dev_priv->current_page == 1)
int offset = (dev_priv->sarea_priv->pfCurrentPage == 1)
? dev_priv->front_offset : dev_priv->back_offset;
RING_LOCALS;
DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
DRM_DEBUG("%s: pfCurrentPage=%d\n",
__FUNCTION__,
dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
dev_priv->sarea_priv->pfCurrentPage);
/* Do some trivial performance monitoring...
*/
@ -1449,8 +1449,8 @@ static void radeon_cp_dispatch_flip(drm_device_t * dev)
* performing the swapbuffer ioctl.
*/
dev_priv->sarea_priv->last_frame++;
dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
1 - dev_priv->current_page;
dev_priv->sarea_priv->pfCurrentPage =
1 - dev_priv->sarea_priv->pfCurrentPage;
BEGIN_RING(2);
@ -2162,24 +2162,10 @@ static int radeon_do_init_pageflip(drm_device_t * dev)
ADVANCE_RING();
dev_priv->page_flipping = 1;
dev_priv->current_page = 0;
dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
return 0;
}
if (dev_priv->sarea_priv->pfCurrentPage != 1)
dev_priv->sarea_priv->pfCurrentPage = 0;
/* Called whenever a client dies, from drm_release.
* NOTE: Lock isn't necessarily held when this is called!
*/
static int radeon_do_cleanup_pageflip(drm_device_t * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
if (dev_priv->current_page != 0)
radeon_cp_dispatch_flip(dev);
dev_priv->page_flipping = 0;
return 0;
}
@ -3145,6 +3131,9 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
else
value = RADEON_CARD_PCI;
break;
case RADEON_PARAM_VBLANK_CRTC:
value = radeon_vblank_crtc_get(dev);
break;
default:
DRM_DEBUG( "Invalid parameter %d\n", param.param );
return DRM_ERR(EINVAL);
@ -3206,6 +3195,9 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE)
dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
break;
case RADEON_SETPARAM_VBLANK_CRTC:
return radeon_vblank_crtc_set(dev, sp.value);
break;
default:
DRM_DEBUG("Invalid parameter %d\n", sp.param);
return DRM_ERR(EINVAL);
@ -3225,9 +3217,7 @@ void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp)
{
if (dev->dev_private) {
drm_radeon_private_t *dev_priv = dev->dev_private;
if (dev_priv->page_flipping) {
radeon_do_cleanup_pageflip(dev);
}
dev_priv->page_flipping = 0;
radeon_mem_release(filp, dev_priv->gart_heap);
radeon_mem_release(filp, dev_priv->fb_heap);
radeon_surfaces_release(filp, dev_priv);
@ -3236,6 +3226,14 @@ void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp)
void radeon_driver_lastclose(drm_device_t * dev)
{
if (dev->dev_private) {
drm_radeon_private_t *dev_priv = dev->dev_private;
if (dev_priv->sarea_priv &&
dev_priv->sarea_priv->pfCurrentPage != 0)
radeon_cp_dispatch_flip(dev);
}
radeon_do_release(dev);
}

View File

@ -710,7 +710,7 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
dev_priv->texture_offset = init->texture_offset;
dev_priv->texture_size = init->texture_size;
DRM_GETSAREA();
dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
savage_do_cleanup_bci(dev);

View File

@ -84,9 +84,9 @@ static uint32_t via_cmdbuf_lag(drm_via_private_t *dev_priv)
{
uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
return ((hw_addr <= dev_priv->dma_low) ?
(dev_priv->dma_low - hw_addr) :
return ((hw_addr <= dev_priv->dma_low) ?
(dev_priv->dma_low - hw_addr) :
(dev_priv->dma_wrap + dev_priv->dma_low - hw_addr));
}
@ -103,7 +103,7 @@ via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
uint32_t count;
hw_addr_ptr = dev_priv->hw_addr_ptr;
cur_addr = dev_priv->dma_low;
next_addr = cur_addr + size + 512*1024;
next_addr = cur_addr + size + 512 * 1024;
count = 1000000;
do {
hw_addr = *hw_addr_ptr - agp_base;
@ -207,8 +207,8 @@ static int via_initialize(drm_device_t * dev,
dev_priv->dma_offset = init->offset;
dev_priv->last_pause_ptr = NULL;
dev_priv->hw_addr_ptr =
(volatile uint32_t *)((char *)dev_priv->mmio->handle +
init->reg_pause_addr);
(volatile uint32_t *)((char *)dev_priv->mmio->handle +
init->reg_pause_addr);
via_cmdbuf_start(dev_priv);
@ -239,8 +239,8 @@ static int via_dma_init(DRM_IOCTL_ARGS)
retcode = via_dma_cleanup(dev);
break;
case VIA_DMA_INITIALIZED:
retcode = (dev_priv->ring.virtual_start != NULL) ?
0: DRM_ERR( EFAULT );
retcode = (dev_priv->ring.virtual_start != NULL) ?
0 : DRM_ERR(EFAULT);
break;
default:
retcode = DRM_ERR(EINVAL);
@ -268,8 +268,7 @@ static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd)
if (cmd->size > VIA_PCI_BUF_SIZE) {
return DRM_ERR(ENOMEM);
}
}
if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
return DRM_ERR(EFAULT);
@ -292,7 +291,7 @@ static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd)
}
memcpy(vb, dev_priv->pci_buf, cmd->size);
dev_priv->dma_low += cmd->size;
/*
@ -301,7 +300,7 @@ static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd)
*/
if (cmd->size < 0x100)
via_pad_cache(dev_priv,(0x100 - cmd->size) >> 3);
via_pad_cache(dev_priv, (0x100 - cmd->size) >> 3);
via_cmdbuf_pause(dev_priv);
return 0;
@ -321,7 +320,7 @@ static int via_flush_ioctl(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
LOCK_TEST_WITH_RETURN( dev, filp );
LOCK_TEST_WITH_RETURN(dev, filp);
return via_driver_dma_quiescent(dev);
}
@ -332,7 +331,7 @@ static int via_cmdbuffer(DRM_IOCTL_ARGS)
drm_via_cmdbuffer_t cmdbuf;
int ret;
LOCK_TEST_WITH_RETURN( dev, filp );
LOCK_TEST_WITH_RETURN(dev, filp);
DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t __user *) data,
sizeof(cmdbuf));
@ -355,16 +354,16 @@ static int via_dispatch_pci_cmdbuffer(drm_device_t * dev,
if (cmd->size > VIA_PCI_BUF_SIZE) {
return DRM_ERR(ENOMEM);
}
}
if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
return DRM_ERR(EFAULT);
if ((ret =
via_verify_command_stream((uint32_t *)dev_priv->pci_buf,
if ((ret =
via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
cmd->size, dev, 0))) {
return ret;
}
ret =
via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf,
cmd->size);
@ -377,7 +376,7 @@ static int via_pci_cmdbuffer(DRM_IOCTL_ARGS)
drm_via_cmdbuffer_t cmdbuf;
int ret;
LOCK_TEST_WITH_RETURN( dev, filp );
LOCK_TEST_WITH_RETURN(dev, filp);
DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t __user *) data,
sizeof(cmdbuf));
@ -393,7 +392,6 @@ static int via_pci_cmdbuffer(DRM_IOCTL_ARGS)
return 0;
}
static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
uint32_t * vb, int qw_count)
{
@ -403,7 +401,6 @@ static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
return vb;
}
/*
* This function is used internally by ring buffer mangement code.
*
@ -419,8 +416,7 @@ static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv)
* modifying the pause address stored in the buffer itself. If
* the regulator has already paused, restart it.
*/
static int via_hook_segment(drm_via_private_t *dev_priv,
static int via_hook_segment(drm_via_private_t * dev_priv,
uint32_t pause_addr_hi, uint32_t pause_addr_lo,
int no_pci_fire)
{
@ -430,8 +426,10 @@ static int via_hook_segment(drm_via_private_t *dev_priv,
paused = 0;
via_flush_write_combine();
*dev_priv->last_pause_ptr = pause_addr_lo;
(void) *(volatile uint32_t *)(via_get_dma(dev_priv) -1);
*paused_at = pause_addr_lo;
via_flush_write_combine();
(void) *paused_at;
reader = *(dev_priv->hw_addr_ptr);
ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
@ -477,7 +475,7 @@ static int via_wait_idle(drm_via_private_t * dev_priv)
}
static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type,
uint32_t addr, uint32_t *cmd_addr_hi,
uint32_t addr, uint32_t *cmd_addr_hi,
uint32_t *cmd_addr_lo, int skip_wait)
{
uint32_t agp_base;
@ -506,9 +504,6 @@ static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type,
return vb;
}
static void via_cmdbuf_start(drm_via_private_t * dev_priv)
{
uint32_t pause_addr_lo, pause_addr_hi;
@ -536,7 +531,7 @@ static void via_cmdbuf_start(drm_via_private_t * dev_priv)
&pause_addr_hi, & pause_addr_lo, 1) - 1;
via_flush_write_combine();
while(! *dev_priv->last_pause_ptr);
(void) *(volatile uint32_t *)dev_priv->last_pause_ptr;
VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
VIA_WRITE(VIA_REG_TRANSPACE, command);

View File

@ -32,7 +32,7 @@ static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init)
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_GETSAREA();
dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
dev->dev_private = (void *)dev_priv;

View File

@ -255,7 +255,6 @@ static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t * seq,
drm_device_t * dev)
{
#ifdef __linux__
struct list_head *list;
drm_map_list_t *r_list;
#endif
drm_local_map_t *map = seq->map_cache;
@ -265,8 +264,7 @@ static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t * seq,
return map;
}
#ifdef __linux__
list_for_each(list, &dev->maplist->head) {
r_list = (drm_map_list_t *) list;
list_for_each_entry(r_list, &dev->maplist, head) {
map = r_list->map;
if (!map)
continue;