Merge branch 'master' into cleanup

Conflicts:

	libdrm/xf86drm.c
	linux-core/drm_bo.c
	linux-core/drm_fence.c
main
Dave Airlie 2007-07-11 11:23:41 +10:00
commit 2c9e05cf4c
92 changed files with 5520 additions and 2454 deletions

View File

@ -69,4 +69,3 @@ drm_pciids.h: ${SHARED}/drm_pciids.txt
${SHAREDFILES}:
ln -sf ${SHARED}/$@ $@

View File

@ -32,8 +32,6 @@
#include "drmP.h"
#define ATI_PCIGART_PAGE_SIZE 4096 /* PCI GART page size */
#define ATI_MAX_PCIGART_PAGES 8192 /* 32 MB aperture, 4K pages */
#define ATI_PCIGART_TABLE_SIZE 32768
int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
{
@ -48,7 +46,7 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
/* GART table in system memory */
dev->sg->dmah = drm_pci_alloc(dev, ATI_PCIGART_TABLE_SIZE, 0,
dev->sg->dmah = drm_pci_alloc(dev, gart_info->table_size, 0,
0xfffffffful);
if (dev->sg->dmah == NULL) {
DRM_ERROR("cannot allocate PCI GART table!\n");
@ -63,9 +61,9 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
pci_gart = gart_info->addr;
}
pages = DRM_MIN(dev->sg->pages, ATI_MAX_PCIGART_PAGES);
pages = DRM_MIN(dev->sg->pages, gart_info->table_size / sizeof(u32));
bzero(pci_gart, ATI_PCIGART_TABLE_SIZE);
bzero(pci_gart, gart_info->table_size);
KASSERT(PAGE_SIZE >= ATI_PCIGART_PAGE_SIZE, ("page size too small"));
@ -73,10 +71,17 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
page_base = (u32) dev->sg->busaddr[i];
for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
if (gart_info->is_pcie)
*pci_gart = (cpu_to_le32(page_base) >> 8) | 0xc;
else
switch(gart_info->gart_reg_if) {
case DRM_ATI_GART_IGP:
*pci_gart = cpu_to_le32(page_base | 0xc);
break;
case DRM_ATI_GART_PCIE:
*pci_gart = cpu_to_le32((page_base >> 8) | 0xc);
break;
default:
*pci_gart = cpu_to_le32(page_base);
break;
}
pci_gart++;
page_base += ATI_PCIGART_PAGE_SIZE;
}

View File

@ -47,6 +47,9 @@ typedef struct drm_file drm_file_t;
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/stat.h>
#if __FreeBSD_version >= 700000
#include <sys/priv.h>
#endif
#include <sys/proc.h>
#include <sys/lock.h>
#include <sys/fcntl.h>
@ -230,7 +233,11 @@ enum {
#define PAGE_ALIGN(addr) round_page(addr)
/* DRM_SUSER returns true if the user is superuser */
#if __FreeBSD_version >= 700000
#define DRM_SUSER(p) (priv_check(p, PRIV_DRIVER) == 0)
#else
#define DRM_SUSER(p) (suser(p) == 0)
#endif
#define DRM_AGP_FIND_DEVICE() agp_find_device()
#define DRM_MTRR_WC MDF_WRITECOMBINE
#define jiffies ticks
@ -394,19 +401,6 @@ do { \
} \
} while (0)
#define DRM_GETSAREA() \
do { \
drm_local_map_t *map; \
DRM_SPINLOCK_ASSERT(&dev->dev_lock); \
TAILQ_FOREACH(map, &dev->maplist, link) { \
if (map->type == _DRM_SHM && \
map->flags & _DRM_CONTAINS_LOCK) { \
dev_priv->sarea = map; \
break; \
} \
} \
} while (0)
#if defined(__FreeBSD__) && __FreeBSD_version > 500000
#define DRM_WAIT_ON( ret, queue, timeout, condition ) \
for ( ret = 0 ; !ret && !(condition) ; ) { \
@ -627,12 +621,17 @@ typedef struct drm_vbl_sig {
#define DRM_ATI_GART_MAIN 1
#define DRM_ATI_GART_FB 2
#define DRM_ATI_GART_PCI 1
#define DRM_ATI_GART_PCIE 2
#define DRM_ATI_GART_IGP 3
typedef struct ati_pcigart_info {
int gart_table_location;
int is_pcie;
int gart_reg_if;
void *addr;
dma_addr_t bus_addr;
drm_local_map_t mapping;
int table_size;
} drm_ati_pcigart_info;
struct drm_driver_info {
@ -822,6 +821,7 @@ dev_type_read(drm_read);
dev_type_poll(drm_poll);
dev_type_mmap(drm_mmap);
#endif
extern drm_local_map_t *drm_getsarea(drm_device_t *dev);
/* File operations helpers (drm_fops.c) */
#ifdef __FreeBSD__
@ -915,6 +915,7 @@ int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request);
/* Scatter Gather Support (drm_scatter.c) */
void drm_sg_cleanup(drm_sg_mem_t *entry);
int drm_sg_alloc(drm_device_t * dev, drm_scatter_gather_t * request);
#ifdef __FreeBSD__
/* sysctl support (drm_sysctl.h) */
@ -989,7 +990,7 @@ int drm_agp_unbind_ioctl(DRM_IOCTL_ARGS);
int drm_agp_bind_ioctl(DRM_IOCTL_ARGS);
/* Scatter Gather Support (drm_scatter.c) */
int drm_sg_alloc(DRM_IOCTL_ARGS);
int drm_sg_alloc_ioctl(DRM_IOCTL_ARGS);
int drm_sg_free(DRM_IOCTL_ARGS);
/* consistent PCI memory functions (drm_pci.c) */

View File

@ -43,7 +43,7 @@ static int
drm_device_find_capability(drm_device_t *dev, int cap)
{
#ifdef __FreeBSD__
#if __FreeBSD_version >= 700010
#if __FreeBSD_version >= 602102
return (pci_find_extcap(dev->device, cap, NULL) == 0);
#else

View File

@ -117,7 +117,7 @@ static drm_ioctl_desc_t drm_ioctls[256] = {
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = { drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = { drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = { drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = { drm_wait_vblank, 0 },
@ -912,6 +912,18 @@ int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
return DRM_ERR(retcode);
}
drm_local_map_t *drm_getsarea(drm_device_t *dev)
{
drm_local_map_t *map;
DRM_SPINLOCK_ASSERT(&dev->dev_lock);
TAILQ_FOREACH(map, &dev->maplist, link) {
if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK))
return map;
}
return NULL;
}
#if DRM_LINUX

View File

@ -40,28 +40,20 @@ void drm_sg_cleanup(drm_sg_mem_t *entry)
free(entry, M_DRM);
}
int drm_sg_alloc(DRM_IOCTL_ARGS)
int drm_sg_alloc(drm_device_t * dev, drm_scatter_gather_t * request)
{
DRM_DEVICE;
drm_scatter_gather_t request;
drm_sg_mem_t *entry;
unsigned long pages;
int i;
DRM_DEBUG( "%s\n", __FUNCTION__ );
if ( dev->sg )
return EINVAL;
DRM_COPY_FROM_USER_IOCTL(request, (drm_scatter_gather_t *)data,
sizeof(request) );
entry = malloc(sizeof(*entry), M_DRM, M_WAITOK | M_ZERO);
if ( !entry )
return ENOMEM;
pages = round_page(request.size) / PAGE_SIZE;
DRM_DEBUG( "sg size=%ld pages=%ld\n", request.size, pages );
DRM_DEBUG( "sg size=%ld pages=%ld\n", request->size, pages );
entry->pages = pages;
@ -86,11 +78,7 @@ int drm_sg_alloc(DRM_IOCTL_ARGS)
DRM_DEBUG( "sg alloc handle = %08lx\n", entry->handle );
entry->virtual = (void *)entry->handle;
request.handle = entry->handle;
DRM_COPY_TO_USER_IOCTL( (drm_scatter_gather_t *)data,
request,
sizeof(request) );
request->handle = entry->handle;
DRM_LOCK();
if (dev->sg) {
@ -101,6 +89,27 @@ int drm_sg_alloc(DRM_IOCTL_ARGS)
dev->sg = entry;
DRM_UNLOCK();
}
int drm_sg_alloc_ioctl(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_scatter_gather_t request;
int ret;
DRM_DEBUG( "%s\n", __FUNCTION__ );
DRM_COPY_FROM_USER_IOCTL(request, (drm_scatter_gather_t *)data,
sizeof(request) );
ret = drm_sg_alloc(dev, &request);
if ( ret ) return ret;
DRM_COPY_TO_USER_IOCTL( (drm_scatter_gather_t *)data,
request,
sizeof(request) );
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -60,7 +60,7 @@ typedef struct _drmMMListHead
(__item)->next = (__item); \
} while (0)
#define DRMLISTADD(__item, __list) \
#define DRMLISTADD(__item, __list) \
do { \
(__item)->prev = (__list); \
(__item)->next = (__list)->next; \
@ -93,16 +93,18 @@ typedef struct _drmMMListHead
#define DRMLISTENTRY(__type, __item, __field) \
((__type *)(((char *) (__item)) - offsetof(__type, __field)))
typedef struct _drmFence{
unsigned handle;
int class;
unsigned type;
unsigned flags;
unsigned signaled;
unsigned pad[4]; /* for future expansion */
typedef struct _drmFence
{
unsigned handle;
int class;
unsigned type;
unsigned flags;
unsigned signaled;
unsigned pad[4]; /* for future expansion */
} drmFence;
typedef struct _drmBO{
typedef struct _drmBO
{
drm_bo_type_t type;
unsigned handle;
drm_u64_t mapHandle;
@ -124,8 +126,8 @@ typedef struct _drmBO{
unsigned pad[8]; /* for future expansion */
} drmBO;
typedef struct _drmBONode {
typedef struct _drmBONode
{
drmMMListHead head;
drmBO *buf;
struct drm_bo_op_arg bo_arg;
@ -141,22 +143,24 @@ typedef struct _drmBOList {
drmMMListHead free;
} drmBOList;
/* Fencing */
extern int drmFenceCreate(int fd, unsigned flags, int class,
unsigned type,
drmFence *fence);
extern int drmFenceDestroy(int fd, const drmFence *fence);
extern int drmFenceReference(int fd, unsigned handle, drmFence *fence);
extern int drmFenceUnreference(int fd, const drmFence *fence);
extern int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type);
extern int drmFenceSignaled(int fd, drmFence *fence,
unsigned fenceType, int *signaled);
extern int drmFenceWait(int fd, unsigned flags, drmFence *fence,
unsigned flush_type);
extern int drmFenceEmit(int fd, unsigned flags, drmFence *fence,
unsigned emit_type);
extern int drmFenceBuffers(int fd, unsigned flags, drmFence *fence);
/*
* Fence functions.
*/
extern int drmFenceCreate(int fd, unsigned flags, int class,
unsigned type, drmFence *fence);
extern int drmFenceDestroy(int fd, const drmFence *fence);
extern int drmFenceReference(int fd, unsigned handle, drmFence *fence);
extern int drmFenceUnreference(int fd, const drmFence *fence);
extern int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type);
extern int drmFenceSignaled(int fd, drmFence *fence,
unsigned fenceType, int *signaled);
extern int drmFenceWait(int fd, unsigned flags, drmFence *fence,
unsigned flush_type);
extern int drmFenceEmit(int fd, unsigned flags, drmFence *fence,
unsigned emit_type);
extern int drmFenceBuffers(int fd, unsigned flags, drmFence *fence);
/*

View File

@ -283,6 +283,7 @@ CONFIG_DRM_SAVAGE := n
CONFIG_DRM_VIA := n
CONFIG_DRM_MACH64 := n
CONFIG_DRM_NV := n
CONFIG_DRM_NOUVEAU := n
# Enable module builds for the modules requested/supported.

View File

@ -21,12 +21,14 @@ i810-objs := i810_drv.o i810_dma.o
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
i915_buffer.o
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
nouveau_object.o nouveau_irq.o \
nouveau_object.o nouveau_irq.o nouveau_notifier.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
nv04_fb.o nv10_fb.o nv40_fb.o \
nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
nv04_graph.o nv10_graph.o nv20_graph.o nv30_graph.o \
nv40_graph.o
nv40_graph.o nv50_graph.o \
nv04_instmem.o nv50_instmem.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
sis-objs := sis_drv.o sis_mm.o
ffb-objs := ffb_drv.o ffb_context.o

View File

@ -151,7 +151,8 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
order = drm_order((gart_info->table_size + (PAGE_SIZE-1)) / PAGE_SIZE);
order = drm_order((gart_info->table_size +
(PAGE_SIZE-1)) / PAGE_SIZE);
num_pages = 1 << order;
address = drm_ati_alloc_pcigart_table(order);
if (!address) {
@ -169,7 +170,8 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
PCI_DMA_TODEVICE);
if (bus_address == 0) {
DRM_ERROR("unable to map PCIGART pages!\n");
order = drm_order((gart_info->table_size + (PAGE_SIZE-1)) / PAGE_SIZE);
order = drm_order((gart_info->table_size +
(PAGE_SIZE-1)) / PAGE_SIZE);
drm_ati_free_pcigart_table(address, order);
address = NULL;
goto done;

View File

@ -76,6 +76,7 @@
#include <asm/pgalloc.h>
#include "drm.h"
#include <linux/slab.h>
#include <linux/idr.h>
#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
@ -300,19 +301,14 @@ typedef struct drm_devstate {
} drm_devstate_t;
typedef struct drm_magic_entry {
drm_hash_item_t hash_item;
struct list_head head;
drm_hash_item_t hash_item;
struct drm_file *priv;
} drm_magic_entry_t;
typedef struct drm_magic_head {
struct drm_magic_entry *head;
struct drm_magic_entry *tail;
} drm_magic_head_t;
typedef struct drm_vma_entry {
struct list_head head;
struct vm_area_struct *vma;
struct drm_vma_entry *next;
pid_t pid;
} drm_vma_entry_t;
@ -411,8 +407,7 @@ typedef struct drm_file {
uid_t uid;
drm_magic_t magic;
unsigned long ioctl_count;
struct drm_file *next;
struct drm_file *prev;
struct list_head lhead;
struct drm_head *head;
int remove_auth_on_close;
unsigned long lock_count;
@ -493,8 +488,7 @@ typedef struct drm_agp_mem {
DRM_AGP_MEM *memory;
unsigned long bound; /**< address */
int pages;
struct drm_agp_mem *prev; /**< previous entry */
struct drm_agp_mem *next; /**< next entry */
struct list_head head;
} drm_agp_mem_t;
/**
@ -504,7 +498,7 @@ typedef struct drm_agp_mem {
*/
typedef struct drm_agp_head {
DRM_AGP_KERN agp_info; /**< AGP device information */
drm_agp_mem_t *memory; /**< memory entries */
struct list_head memory;
unsigned long mode; /**< AGP mode */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,11)
struct agp_bridge_data *bridge;
@ -600,7 +594,6 @@ typedef struct ati_pcigart_info {
int table_size;
} drm_ati_pcigart_info;
#include "drm_objects.h"
/**
@ -726,15 +719,14 @@ typedef struct drm_device {
/** \name Authentication */
/*@{ */
drm_file_t *file_first; /**< file list head */
drm_file_t *file_last; /**< file list tail */
struct list_head filelist;
drm_open_hash_t magiclist;
struct list_head magicfree;
/*@} */
/** \name Memory management */
/*@{ */
drm_map_list_t *maplist; /**< Linked list of regions */
struct list_head maplist; /**< Linked list of regions */
int map_count; /**< Number of mappable regions */
drm_open_hash_t map_hash; /**< User token hash table for maps */
drm_mm_t offset_manager; /**< User token manager */
@ -744,14 +736,13 @@ typedef struct drm_device {
/** \name Context handle management */
/*@{ */
drm_ctx_list_t *ctxlist; /**< Linked list of context handles */
struct list_head ctxlist; /**< Linked list of context handles */
int ctx_count; /**< Number of context handles */
struct mutex ctxlist_mutex; /**< For ctxlist */
drm_map_t **context_sareas; /**< per-context SAREA's */
int max_context;
struct idr ctx_idr;
drm_vma_entry_t *vmalist; /**< List of vmas (for debugging) */
struct list_head vmalist; /**< List of vmas (for debugging) */
drm_lock_data_t lock; /**< Information on hardware lock */
/*@} */
@ -787,8 +778,8 @@ typedef struct drm_device {
atomic_t vbl_received;
atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */
spinlock_t vbl_lock;
drm_vbl_sig_t vbl_sigs; /**< signal list to send on VBLANK */
drm_vbl_sig_t vbl_sigs2; /**< signals to send on secondary VBLANK */
struct list_head vbl_sigs; /**< signal list to send on VBLANK */
struct list_head vbl_sigs2; /**< signals to send on secondary VBLANK */
unsigned int vbl_pending;
spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
void (*locked_tasklet_func)(struct drm_device *dev);
@ -810,7 +801,6 @@ typedef struct drm_device {
struct pci_controller *hose;
#endif
drm_sg_mem_t *sg; /**< Scatter gather memory */
unsigned long *ctx_bitmap; /**< context bitmap */
void *dev_private; /**< device private data */
drm_sigdata_t sigdata; /**< For block_all_signals */
sigset_t sigmask;
@ -826,10 +816,7 @@ typedef struct drm_device {
/** \name Drawable information */
/*@{ */
spinlock_t drw_lock;
unsigned int drw_bitfield_length;
u32 *drw_bitfield;
unsigned int drw_info_length;
drm_drawable_info_t **drw_info;
struct idr drw_idr;
/*@} */
} drm_device_t;
@ -1007,6 +994,7 @@ extern int drm_update_drawable_info(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev,
drm_drawable_t id);
extern void drm_drawable_free_all(drm_device_t *dev);
/* Authentication IOCTL support (drm_auth.h) */
extern int drm_getmagic(struct inode *inode, struct file *filp,
@ -1129,9 +1117,7 @@ extern struct drm_sysfs_class *drm_class;
extern struct proc_dir_entry *drm_proc_root;
extern drm_local_map_t *drm_getsarea(struct drm_device *dev);
extern int drm_wait_on(drm_device_t *dev, wait_queue_head_t *queue,
int timeout, int (*fn)(drm_device_t *dev, void *priv),
void *priv);
/* Proc support (drm_proc.h) */
extern int drm_proc_init(drm_device_t * dev,
int minor,
@ -1143,8 +1129,9 @@ extern int drm_proc_cleanup(int minor,
/* Scatter Gather Support (drm_scatter.h) */
extern void drm_sg_cleanup(drm_sg_mem_t * entry);
extern int drm_sg_alloc(struct inode *inode, struct file *filp,
extern int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_sg_alloc(drm_device_t *dev, drm_scatter_gather_t * request);
extern int drm_sg_free(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
@ -1194,7 +1181,7 @@ static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
unsigned int token)
{
drm_map_list_t *_entry;
list_for_each_entry(_entry, &dev->maplist->head, head)
list_for_each_entry(_entry, &dev->maplist, head)
if (_entry->user_token == token)
return _entry->map;
return NULL;

View File

@ -249,11 +249,7 @@ int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request)
entry->memory = memory;
entry->bound = 0;
entry->pages = pages;
entry->prev = NULL;
entry->next = dev->agp->memory;
if (dev->agp->memory)
dev->agp->memory->prev = entry;
dev->agp->memory = entry;
list_add(&entry->head, &dev->agp->memory);
request->handle = entry->handle;
request->physical = memory->physical;
@ -280,10 +276,12 @@ int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp,
return err;
if (copy_to_user(argp, &request, sizeof(request))) {
drm_agp_mem_t *entry = dev->agp->memory;
dev->agp->memory = entry->next;
dev->agp->memory->prev = NULL;
drm_agp_mem_t *entry;
list_for_each_entry(entry, &dev->agp->memory, head) {
if (entry->handle == request.handle)
break;
}
list_del(&entry->head);
drm_free_agp(entry->memory, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
return -EFAULT;
@ -306,7 +304,7 @@ static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev,
{
drm_agp_mem_t *entry;
for (entry = dev->agp->memory; entry; entry = entry->next) {
list_for_each_entry(entry, &dev->agp->memory, head) {
if (entry->handle == handle)
return entry;
}
@ -435,13 +433,7 @@ int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request)
if (entry->bound)
drm_unbind_agp(entry->memory);
if (entry->prev)
entry->prev->next = entry->next;
else
dev->agp->memory = entry->next;
if (entry->next)
entry->next->prev = entry->prev;
list_del(&entry->head);
drm_free_agp(entry->memory, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
@ -502,7 +494,7 @@ drm_agp_head_t *drm_agp_init(drm_device_t *dev)
drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS);
return NULL;
}
head->memory = NULL;
INIT_LIST_HEAD(&head->memory);
head->cant_use_aperture = head->agp_info.cant_use_aperture;
head->page_mask = head->agp_info.page_mask;
return head;

View File

@ -67,6 +67,9 @@ void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo)
{
drm_mem_type_manager_t *man;
DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
DRM_ASSERT_LOCKED(&bo->mutex);
man = &bo->dev->bm.man[bo->pinned_mem_type];
list_add_tail(&bo->pinned_lru, &man->pinned);
}
@ -75,6 +78,8 @@ void drm_bo_add_to_lru(drm_buffer_object_t * bo)
{
drm_mem_type_manager_t *man;
DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
|| bo->mem.mem_type != bo->pinned_mem_type) {
man = &bo->dev->bm.man[bo->mem.mem_type];
@ -134,6 +139,8 @@ static int drm_bo_add_ttm(drm_buffer_object_t * bo)
int ret = 0;
bo->ttm = NULL;
DRM_ASSERT_LOCKED(&bo->mutex);
switch (bo->type) {
case drm_bo_type_dc:
bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
@ -262,29 +269,25 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t * bo,
int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
int no_wait)
{
drm_fence_object_t *fence = bo->fence;
int ret;
if (fence) {
drm_device_t *dev = bo->dev;
if (drm_fence_object_signaled(fence, bo->fence_type)) {
drm_fence_usage_deref_unlocked(dev, fence);
bo->fence = NULL;
DRM_ASSERT_LOCKED(&bo->mutex);
if (bo->fence) {
if (drm_fence_object_signaled(bo->fence, bo->fence_type, 0)) {
drm_fence_usage_deref_unlocked(&bo->fence);
return 0;
}
if (no_wait) {
return -EBUSY;
}
ret =
drm_fence_object_wait(dev, fence, lazy, ignore_signals,
drm_fence_object_wait(bo->fence, lazy, ignore_signals,
bo->fence_type);
if (ret)
return ret;
drm_fence_usage_deref_unlocked(dev, fence);
bo->fence = NULL;
drm_fence_usage_deref_unlocked(&bo->fence);
}
return 0;
}
@ -312,10 +315,8 @@ static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors)
"Evicting buffer.\n");
}
}
if (bo->fence) {
drm_fence_usage_deref_unlocked(dev, bo->fence);
bo->fence = NULL;
}
if (bo->fence)
drm_fence_usage_deref_unlocked(&bo->fence);
}
return 0;
}
@ -331,16 +332,17 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all)
drm_device_t *dev = bo->dev;
drm_buffer_manager_t *bm = &dev->bm;
DRM_ASSERT_LOCKED(&dev->struct_mutex);
atomic_inc(&bo->usage);
mutex_unlock(&dev->struct_mutex);
mutex_lock(&bo->mutex);
DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
if (bo->fence && drm_fence_object_signaled(bo->fence, bo->fence_type)) {
drm_fence_usage_deref_locked(dev, bo->fence);
bo->fence = NULL;
}
if (bo->fence && drm_fence_object_signaled(bo->fence,
bo->fence_type, 0))
drm_fence_usage_deref_unlocked(&bo->fence);
if (bo->fence && remove_all)
(void)drm_bo_expire_fence(bo, 0);
@ -371,7 +373,7 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all)
}
if (list_empty(&bo->ddestroy)) {
drm_fence_object_flush(dev, bo->fence, bo->fence_type);
drm_fence_object_flush(bo->fence, bo->fence_type);
list_add_tail(&bo->ddestroy, &bm->ddestroy);
schedule_delayed_work(&bm->wq,
((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
@ -392,6 +394,8 @@ static void drm_bo_destroy_locked(drm_buffer_object_t * bo)
drm_device_t *dev = bo->dev;
drm_buffer_manager_t *bm = &dev->bm;
DRM_ASSERT_LOCKED(&dev->struct_mutex);
if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
@ -414,6 +418,7 @@ static void drm_bo_destroy_locked(drm_buffer_object_t * bo)
atomic_dec(&bm->count);
BUG_ON(!list_empty(&bo->base.list));
drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
return;
@ -488,10 +493,15 @@ static void drm_bo_delayed_workqueue(struct work_struct *work)
mutex_unlock(&dev->struct_mutex);
}
void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo)
{
if (atomic_dec_and_test(&bo->usage)) {
drm_bo_destroy_locked(bo);
struct drm_buffer_object *tmp_bo = *bo;
bo = NULL;
DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
if (atomic_dec_and_test(&tmp_bo->usage)) {
drm_bo_destroy_locked(tmp_bo);
}
}
@ -500,18 +510,22 @@ static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
drm_buffer_object_t *bo =
drm_user_object_entry(uo, drm_buffer_object_t, base);
DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
drm_bo_takedown_vm_locked(bo);
drm_bo_usage_deref_locked(bo);
drm_bo_usage_deref_locked(&bo);
}
static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo)
static void drm_bo_usage_deref_unlocked(drm_buffer_object_t ** bo)
{
drm_device_t *dev = bo->dev;
struct drm_buffer_object *tmp_bo = *bo;
drm_device_t *dev = tmp_bo->dev;
if (atomic_dec_and_test(&bo->usage)) {
*bo = NULL;
if (atomic_dec_and_test(&tmp_bo->usage)) {
mutex_lock(&dev->struct_mutex);
if (atomic_read(&bo->usage) == 0)
drm_bo_destroy_locked(bo);
if (atomic_read(&tmp_bo->usage) == 0)
drm_bo_destroy_locked(tmp_bo);
mutex_unlock(&dev->struct_mutex);
}
}
@ -597,18 +611,17 @@ int drm_fence_buffer_objects(drm_file_t * priv,
if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
count++;
if (entry->fence)
drm_fence_usage_deref_locked(dev, entry->fence);
entry->fence = fence;
drm_fence_usage_deref_locked(&entry->fence);
entry->fence = drm_fence_reference_locked(fence);
DRM_FLAG_MASKED(entry->priv_flags, 0,
_DRM_BO_FLAG_UNFENCED);
DRM_WAKEUP(&entry->event_queue);
drm_bo_add_to_lru(entry);
}
mutex_unlock(&entry->mutex);
drm_bo_usage_deref_locked(entry);
drm_bo_usage_deref_locked(&entry);
l = f_list.next;
}
atomic_add(count, &fence->usage);
DRM_DEBUG("Fenced %d buffers\n", count);
out:
mutex_unlock(&dev->struct_mutex);
@ -723,7 +736,7 @@ static int drm_bo_mem_force_space(drm_device_t * dev,
ret = drm_bo_evict(entry, mem_type, no_wait);
mutex_unlock(&entry->mutex);
drm_bo_usage_deref_unlocked(entry);
drm_bo_usage_deref_unlocked(&entry);
if (ret)
return ret;
mutex_lock(&dev->struct_mutex);
@ -943,10 +956,8 @@ static int drm_bo_quick_busy(drm_buffer_object_t * bo)
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
if (fence) {
drm_device_t *dev = bo->dev;
if (drm_fence_object_signaled(fence, bo->fence_type)) {
drm_fence_usage_deref_unlocked(dev, fence);
bo->fence = NULL;
if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
drm_fence_usage_deref_unlocked(&bo->fence);
return 0;
}
return 1;
@ -965,16 +976,13 @@ static int drm_bo_busy(drm_buffer_object_t * bo)
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
if (fence) {
drm_device_t *dev = bo->dev;
if (drm_fence_object_signaled(fence, bo->fence_type)) {
drm_fence_usage_deref_unlocked(dev, fence);
bo->fence = NULL;
if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
drm_fence_usage_deref_unlocked(&bo->fence);
return 0;
}
drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
if (drm_fence_object_signaled(fence, bo->fence_type)) {
drm_fence_usage_deref_unlocked(dev, fence);
bo->fence = NULL;
drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
drm_fence_usage_deref_unlocked(&bo->fence);
return 0;
}
return 1;
@ -1171,7 +1179,7 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
drm_bo_fill_rep_arg(bo, rep);
out:
mutex_unlock(&bo->mutex);
drm_bo_usage_deref_unlocked(bo);
drm_bo_usage_deref_unlocked(&bo);
return ret;
}
@ -1197,7 +1205,7 @@ static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
}
drm_remove_ref_object(priv, ro);
drm_bo_usage_deref_locked(bo);
drm_bo_usage_deref_locked(&bo);
out:
mutex_unlock(&dev->struct_mutex);
return ret;
@ -1487,11 +1495,14 @@ static int drm_bo_handle_validate(drm_file_t * priv,
uint64_t flags, uint64_t mask, uint32_t hint,
struct drm_bo_info_rep *rep)
{
struct drm_device *dev = priv->head->dev;
drm_buffer_object_t *bo;
int ret;
int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
mutex_lock(&dev->struct_mutex);
bo = drm_lookup_buffer_object(priv, handle, 1);
mutex_unlock(&dev->struct_mutex);
if (!bo) {
return -EINVAL;
}
@ -1517,16 +1528,20 @@ static int drm_bo_handle_validate(drm_file_t * priv,
mutex_unlock(&bo->mutex);
drm_bo_usage_deref_unlocked(bo);
drm_bo_usage_deref_unlocked(&bo);
return ret;
}
static int drm_bo_handle_info(drm_file_t *priv, uint32_t handle,
struct drm_bo_info_rep *rep)
{
struct drm_device *dev = priv->head->dev;
drm_buffer_object_t *bo;
mutex_lock(&dev->struct_mutex);
bo = drm_lookup_buffer_object(priv, handle, 1);
mutex_unlock(&dev->struct_mutex);
if (!bo) {
return -EINVAL;
}
@ -1535,7 +1550,7 @@ static int drm_bo_handle_info(drm_file_t *priv, uint32_t handle,
(void)drm_bo_busy(bo);
drm_bo_fill_rep_arg(bo, rep);
mutex_unlock(&bo->mutex);
drm_bo_usage_deref_unlocked(bo);
drm_bo_usage_deref_unlocked(&bo);
return 0;
}
@ -1543,11 +1558,15 @@ static int drm_bo_handle_wait(drm_file_t *priv, uint32_t handle,
uint32_t hint,
struct drm_bo_info_rep *rep)
{
struct drm_device *dev = priv->head->dev;
drm_buffer_object_t *bo;
int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
int ret;
mutex_lock(&dev->struct_mutex);
bo = drm_lookup_buffer_object(priv, handle, 1);
mutex_unlock(&dev->struct_mutex);
if (!bo) {
return -EINVAL;
}
@ -1564,7 +1583,7 @@ static int drm_bo_handle_wait(drm_file_t *priv, uint32_t handle,
out:
mutex_unlock(&bo->mutex);
drm_bo_usage_deref_unlocked(bo);
drm_bo_usage_deref_unlocked(&bo);
return ret;
}
@ -1649,7 +1668,7 @@ int drm_buffer_object_create(drm_device_t *dev,
out_err:
mutex_unlock(&bo->mutex);
drm_bo_usage_deref_unlocked(bo);
drm_bo_usage_deref_unlocked(&bo);
return ret;
}
@ -1774,7 +1793,7 @@ int drm_bo_create_ioctl(DRM_IOCTL_ARGS)
ret = drm_bo_add_user_object(priv, entry,
req->mask & DRM_BO_FLAG_SHAREABLE);
if (ret) {
drm_bo_usage_deref_unlocked(entry);
drm_bo_usage_deref_unlocked(&entry);
goto out;
}
@ -2090,7 +2109,7 @@ restart:
allow_errors);
mutex_lock(&dev->struct_mutex);
drm_bo_usage_deref_locked(entry);
drm_bo_usage_deref_locked(&entry);
if (ret)
return ret;
@ -2100,10 +2119,8 @@ restart:
do_restart = ((next->prev != list) && (next->prev != prev));
if (nentry != NULL && do_restart) {
drm_bo_usage_deref_locked(nentry);
nentry = NULL;
}
if (nentry != NULL && do_restart)
drm_bo_usage_deref_locked(&nentry);
if (do_restart)
goto restart;
@ -2556,6 +2573,7 @@ static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo)
drm_local_map_t *map;
drm_device_t *dev = bo->dev;
DRM_ASSERT_LOCKED(&dev->struct_mutex);
if (list->user_token) {
drm_ht_remove_item(&dev->map_hash, &list->hash);
list->user_token = 0;
@ -2572,7 +2590,7 @@ static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo)
drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
list->map = NULL;
list->user_token = 0ULL;
drm_bo_usage_deref_locked(bo);
drm_bo_usage_deref_locked(&bo);
}
static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo)
@ -2581,6 +2599,7 @@ static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo)
drm_local_map_t *map;
drm_device_t *dev = bo->dev;
DRM_ASSERT_LOCKED(&dev->struct_mutex);
list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
if (!list->map)
return -ENOMEM;

View File

@ -306,7 +306,7 @@ int drm_buffer_object_transfer(drm_buffer_object_t * bo,
INIT_LIST_HEAD(&fbo->p_mm_list);
#endif
atomic_inc(&bo->fence->usage);
drm_fence_reference_unlocked(&fbo->fence, bo->fence);
fbo->pinned_node = NULL;
fbo->mem.mm_node->private = (void *)fbo;
atomic_set(&fbo->usage, 1);
@ -339,7 +339,7 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo,
drm_buffer_object_t *old_obj;
if (bo->fence)
drm_fence_usage_deref_unlocked(dev, bo->fence);
drm_fence_usage_deref_unlocked(&bo->fence);
ret = drm_fence_object_create(dev, fence_class, fence_type,
fence_flags | DRM_FENCE_FLAG_EMIT,
&bo->fence);
@ -396,7 +396,7 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo,
DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
drm_bo_add_to_lru(old_obj);
drm_bo_usage_deref_locked(old_obj);
drm_bo_usage_deref_locked(&old_obj);
mutex_unlock(&dev->struct_mutex);
}

View File

@ -51,10 +51,8 @@ EXPORT_SYMBOL(drm_get_resource_len);
static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
drm_local_map_t *map)
{
struct list_head *list;
list_for_each(list, &dev->maplist->head) {
drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
drm_map_list_t *entry;
list_for_each_entry(entry, &dev->maplist, head) {
if (entry->map && map->type == entry->map->type &&
((entry->map->offset == map->offset) ||
(map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
@ -237,14 +235,14 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
* skipped and we double check that dev->agp->memory is
* actually set as well as being invalid before EPERM'ing
*/
for (entry = dev->agp->memory; entry; entry = entry->next) {
list_for_each_entry(entry, &dev->agp->memory, head) {
if ((map->offset >= entry->bound) &&
(map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
valid = 1;
break;
}
}
if (dev->agp->memory && !valid) {
if (!list_empty(&dev->agp->memory) && !valid) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -EPERM;
}
@ -288,7 +286,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
list->map = map;
mutex_lock(&dev->struct_mutex);
list_add(&list->head, &dev->maplist->head);
list_add(&list->head, &dev->maplist);
/* Assign a 32-bit handle */
@ -380,29 +378,28 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
*/
int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
{
struct list_head *list;
drm_map_list_t *r_list = NULL;
drm_map_list_t *r_list = NULL, *list_t;
drm_dma_handle_t dmah;
int found = 0;
/* Find the list entry for the map and remove it */
list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head);
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
if (r_list->map == map) {
list_del(list);
list_del(&r_list->head);
drm_ht_remove_key(&dev->map_hash,
r_list->user_token >> PAGE_SHIFT);
drm_free(list, sizeof(*list), DRM_MEM_MAPS);
drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
found = 1;
break;
}
}
if (!found) {
return -EINVAL;
}
/* List has wrapped around to the head pointer, or it's empty and we
* didn't find anything.
*/
if (list == (&dev->maplist->head)) {
return -EINVAL;
}
switch (map->type) {
case _DRM_REGISTERS:
@ -464,7 +461,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
drm_device_t *dev = priv->head->dev;
drm_map_t request;
drm_local_map_t *map = NULL;
struct list_head *list;
drm_map_list_t *r_list;
int ret;
if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) {
@ -472,9 +469,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
}
mutex_lock(&dev->struct_mutex);
list_for_each(list, &dev->maplist->head) {
drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map &&
r_list->user_token == (unsigned long)request.handle &&
r_list->map->flags & _DRM_REMOVABLE) {
@ -486,7 +481,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
/* List has wrapped around to the head pointer, or its empty we didn't
* find anything.
*/
if (list == (&dev->maplist->head)) {
if (list_empty(&dev->maplist) || !map) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
@ -610,14 +605,14 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
/* Make sure buffers are located in AGP memory that we own */
valid = 0;
for (agp_entry = dev->agp->memory; agp_entry; agp_entry = agp_entry->next) {
list_for_each_entry(agp_entry, &dev->agp->memory, head) {
if ((agp_offset >= agp_entry->bound) &&
(agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
valid = 1;
break;
}
}
if (dev->agp->memory && !valid) {
if (!list_empty(&dev->agp->memory) && !valid) {
DRM_DEBUG("zone invalid\n");
return -EINVAL;
}

View File

@ -556,3 +556,126 @@ void drm_bo_finish_unmap(drm_buffer_object_t *bo)
#endif
#ifdef DRM_IDR_COMPAT_FN
/* only called when idp->lock is held */
static void __free_layer(struct idr *idp, struct idr_layer *p)
{
p->ary[0] = idp->id_free;
idp->id_free = p;
idp->id_free_cnt++;
}
static void free_layer(struct idr *idp, struct idr_layer *p)
{
unsigned long flags;
/*
* Depends on the return element being zeroed.
*/
spin_lock_irqsave(&idp->lock, flags);
__free_layer(idp, p);
spin_unlock_irqrestore(&idp->lock, flags);
}
/**
* idr_for_each - iterate through all stored pointers
* @idp: idr handle
* @fn: function to be called for each pointer
* @data: data passed back to callback function
*
* Iterate over the pointers registered with the given idr. The
* callback function will be called for each pointer currently
* registered, passing the id, the pointer and the data pointer passed
* to this function. It is not safe to modify the idr tree while in
* the callback, so functions such as idr_get_new and idr_remove are
* not allowed.
*
* We check the return of @fn each time. If it returns anything other
* than 0, we break out and return that value.
*
* The caller must serialize idr_find() vs idr_get_new() and idr_remove().
*/
int idr_for_each(struct idr *idp,
int (*fn)(int id, void *p, void *data), void *data)
{
int n, id, max, error = 0;
struct idr_layer *p;
struct idr_layer *pa[MAX_LEVEL];
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
p = idp->top;
max = 1 << n;
id = 0;
while (id < max) {
while (n > 0 && p) {
n -= IDR_BITS;
*paa++ = p;
p = p->ary[(id >> n) & IDR_MASK];
}
if (p) {
error = fn(id, (void *)p, data);
if (error)
break;
}
id += 1 << n;
while (n < fls(id)) {
n += IDR_BITS;
p = *--paa;
}
}
return error;
}
EXPORT_SYMBOL(idr_for_each);
/**
* idr_remove_all - remove all ids from the given idr tree
* @idp: idr handle
*
* idr_destroy() only frees up unused, cached idp_layers, but this
* function will remove all id mappings and leave all idp_layers
* unused.
*
* A typical clean-up sequence for objects stored in an idr tree, will
* use idr_for_each() to free all objects, if necessay, then
* idr_remove_all() to remove all ids, and idr_destroy() to free
* up the cached idr_layers.
*/
void idr_remove_all(struct idr *idp)
{
int n, id, max, error = 0;
struct idr_layer *p;
struct idr_layer *pa[MAX_LEVEL];
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
p = idp->top;
max = 1 << n;
id = 0;
while (id < max && !error) {
while (n > IDR_BITS && p) {
n -= IDR_BITS;
*paa++ = p;
p = p->ary[(id >> n) & IDR_MASK];
}
id += 1 << n;
while (n < fls(id)) {
if (p) {
memset(p, 0, sizeof *p);
free_layer(idp, p);
}
n += IDR_BITS;
p = *--paa;
}
}
idp->top = NULL;
idp->layers = 0;
}
EXPORT_SYMBOL(idr_remove_all);
#endif

View File

@ -305,4 +305,13 @@ extern int drm_bo_remap_bound(struct drm_buffer_object *bo);
extern int drm_bo_map_bound(struct vm_area_struct *vma);
#endif
/* fixme when functions are upstreamed */
#define DRM_IDR_COMPAT_FN
#ifdef DRM_IDR_COMPAT_FN
int idr_for_each(struct idr *idp,
int (*fn)(int id, void *p, void *data), void *data);
void idr_remove_all(struct idr *idp);
#endif
#endif

View File

@ -53,26 +53,14 @@
* \param ctx_handle context handle.
*
* Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
* in drm_device::context_sareas, while holding the drm_device::struct_mutex
* in drm_device::ctx_idr, while holding the drm_device::struct_mutex
* lock.
*/
void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
{
if (ctx_handle < 0)
goto failed;
if (!dev->ctx_bitmap)
goto failed;
if (ctx_handle < DRM_MAX_CTXBITMAP) {
mutex_lock(&dev->struct_mutex);
clear_bit(ctx_handle, dev->ctx_bitmap);
dev->context_sareas[ctx_handle] = NULL;
mutex_unlock(&dev->struct_mutex);
return;
}
failed:
DRM_ERROR("Attempt to free invalid context handle: %d\n", ctx_handle);
return;
mutex_lock(&dev->struct_mutex);
idr_remove(&dev->ctx_idr, ctx_handle);
mutex_unlock(&dev->struct_mutex);
}
/**
@ -81,62 +69,29 @@ void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
* \param dev DRM device.
* \return (non-negative) context handle on success or a negative number on failure.
*
* Find the first zero bit in drm_device::ctx_bitmap and (re)allocates
* drm_device::context_sareas to accommodate the new entry while holding the
* Allocate a new idr from drm_device::ctx_idr while holding the
* drm_device::struct_mutex lock.
*/
static int drm_ctxbitmap_next(drm_device_t * dev)
{
int bit;
int new_id;
int ret;
if (!dev->ctx_bitmap)
return -1;
mutex_lock(&dev->struct_mutex);
bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP);
if (bit < DRM_MAX_CTXBITMAP) {
set_bit(bit, dev->ctx_bitmap);
DRM_DEBUG("drm_ctxbitmap_next bit : %d\n", bit);
if ((bit + 1) > dev->max_context) {
dev->max_context = (bit + 1);
if (dev->context_sareas) {
drm_map_t **ctx_sareas;
ctx_sareas = drm_realloc(dev->context_sareas,
(dev->max_context -
1) *
sizeof(*dev->
context_sareas),
dev->max_context *
sizeof(*dev->
context_sareas),
DRM_MEM_MAPS);
if (!ctx_sareas) {
clear_bit(bit, dev->ctx_bitmap);
mutex_unlock(&dev->struct_mutex);
return -1;
}
dev->context_sareas = ctx_sareas;
dev->context_sareas[bit] = NULL;
} else {
/* max_context == 1 at this point */
dev->context_sareas =
drm_alloc(dev->max_context *
sizeof(*dev->context_sareas),
DRM_MEM_MAPS);
if (!dev->context_sareas) {
clear_bit(bit, dev->ctx_bitmap);
mutex_unlock(&dev->struct_mutex);
return -1;
}
dev->context_sareas[bit] = NULL;
}
}
mutex_unlock(&dev->struct_mutex);
return bit;
again:
if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
DRM_ERROR("Out of memory expanding drawable idr\n");
return -ENOMEM;
}
mutex_lock(&dev->struct_mutex);
ret = idr_get_new_above(&dev->ctx_idr, NULL,
DRM_RESERVED_CONTEXTS, &new_id);
if (ret == -EAGAIN) {
mutex_unlock(&dev->struct_mutex);
goto again;
}
mutex_unlock(&dev->struct_mutex);
return -1;
return new_id;
}
/**
@ -144,31 +99,11 @@ static int drm_ctxbitmap_next(drm_device_t * dev)
*
* \param dev DRM device.
*
* Allocates and initialize drm_device::ctx_bitmap and drm_device::context_sareas, while holding
* the drm_device::struct_mutex lock.
* Initialise the drm_device::ctx_idr
*/
int drm_ctxbitmap_init(drm_device_t * dev)
{
int i;
int temp;
mutex_lock(&dev->struct_mutex);
dev->ctx_bitmap = (unsigned long *)drm_alloc(PAGE_SIZE,
DRM_MEM_CTXBITMAP);
if (dev->ctx_bitmap == NULL) {
mutex_unlock(&dev->struct_mutex);
return -ENOMEM;
}
memset((void *)dev->ctx_bitmap, 0, PAGE_SIZE);
dev->context_sareas = NULL;
dev->max_context = -1;
mutex_unlock(&dev->struct_mutex);
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
temp = drm_ctxbitmap_next(dev);
DRM_DEBUG("drm_ctxbitmap_init : %d\n", temp);
}
idr_init(&dev->ctx_idr);
return 0;
}
@ -177,17 +112,13 @@ int drm_ctxbitmap_init(drm_device_t * dev)
*
* \param dev DRM device.
*
* Frees drm_device::ctx_bitmap and drm_device::context_sareas, while holding
* the drm_device::struct_mutex lock.
* Free all idr members using drm_ctx_sarea_free helper function
* while holding the drm_device::struct_mutex lock.
*/
void drm_ctxbitmap_cleanup(drm_device_t * dev)
{
mutex_lock(&dev->struct_mutex);
if (dev->context_sareas)
drm_free(dev->context_sareas,
sizeof(*dev->context_sareas) *
dev->max_context, DRM_MEM_MAPS);
drm_free((void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP);
idr_remove_all(&dev->ctx_idr);
mutex_unlock(&dev->struct_mutex);
}
@ -206,7 +137,7 @@ void drm_ctxbitmap_cleanup(drm_device_t * dev)
* \param arg user argument pointing to a drm_ctx_priv_map structure.
* \return zero on success or a negative number on failure.
*
* Gets the map from drm_device::context_sareas with the handle specified and
* Gets the map from drm_device::ctx_idr with the handle specified and
* returns its handle.
*/
int drm_getsareactx(struct inode *inode, struct file *filp,
@ -223,17 +154,17 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
return -EFAULT;
mutex_lock(&dev->struct_mutex);
if (dev->max_context < 0
|| request.ctx_id >= (unsigned)dev->max_context) {
map = idr_find(&dev->ctx_idr, request.ctx_id);
if (!map) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
map = dev->context_sareas[request.ctx_id];
mutex_unlock(&dev->struct_mutex);
request.handle = NULL;
list_for_each_entry(_entry, &dev->maplist->head,head) {
list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == map) {
request.handle =
(void *)(unsigned long)_entry->user_token;
@ -258,7 +189,7 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
* \return zero on success or a negative number on failure.
*
* Searches the mapping specified in \p arg and update the entry in
* drm_device::context_sareas with it.
* drm_device::ctx_idr with it.
*/
int drm_setsareactx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
@ -268,15 +199,13 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
drm_ctx_priv_map_t request;
drm_map_t *map = NULL;
drm_map_list_t *r_list = NULL;
struct list_head *list;
if (copy_from_user(&request,
(drm_ctx_priv_map_t __user *) arg, sizeof(request)))
return -EFAULT;
mutex_lock(&dev->struct_mutex);
list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map
&& r_list->user_token == (unsigned long) request.handle)
goto found;
@ -289,12 +218,12 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
map = r_list->map;
if (!map)
goto bad;
if (dev->max_context < 0)
if (IS_ERR(idr_replace(&dev->ctx_idr, map, request.ctx_id)))
goto bad;
if (request.ctx_id >= (unsigned)dev->max_context)
goto bad;
dev->context_sareas[request.ctx_id] = map;
mutex_unlock(&dev->struct_mutex);
return 0;
}
@ -449,7 +378,7 @@ int drm_addctx(struct inode *inode, struct file *filp,
ctx_entry->tag = priv;
mutex_lock(&dev->ctxlist_mutex);
list_add(&ctx_entry->head, &dev->ctxlist->head);
list_add(&ctx_entry->head, &dev->ctxlist);
++dev->ctx_count;
mutex_unlock(&dev->ctxlist_mutex);
@ -575,10 +504,10 @@ int drm_rmctx(struct inode *inode, struct file *filp,
}
mutex_lock(&dev->ctxlist_mutex);
if (!list_empty(&dev->ctxlist->head)) {
if (!list_empty(&dev->ctxlist)) {
drm_ctx_list_t *pos, *n;
list_for_each_entry_safe(pos, n, &dev->ctxlist->head, head) {
list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
if (pos->handle == ctx.handle) {
list_del(&pos->head);
drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);

View File

@ -44,82 +44,29 @@ int drm_adddraw(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
unsigned long irqflags;
int i, j;
u32 *bitfield = dev->drw_bitfield;
unsigned int bitfield_length = dev->drw_bitfield_length;
drm_drawable_info_t **info = dev->drw_info;
unsigned int info_length = dev->drw_info_length;
drm_draw_t draw;
int new_id = 0;
int ret;
for (i = 0, j = 0; i < bitfield_length; i++) {
if (bitfield[i] == ~0)
continue;
for (; j < 8 * sizeof(*bitfield); j++)
if (!(bitfield[i] & (1 << j)))
goto done;
again:
if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
DRM_ERROR("Out of memory expanding drawable idr\n");
return -ENOMEM;
}
done:
if (i == bitfield_length) {
bitfield_length++;
bitfield = drm_alloc(bitfield_length * sizeof(*bitfield),
DRM_MEM_BUFS);
if (!bitfield) {
DRM_ERROR("Failed to allocate new drawable bitfield\n");
return DRM_ERR(ENOMEM);
}
if (8 * sizeof(*bitfield) * bitfield_length > info_length) {
info_length += 8 * sizeof(*bitfield);
info = drm_alloc(info_length * sizeof(*info),
DRM_MEM_BUFS);
if (!info) {
DRM_ERROR("Failed to allocate new drawable info"
" array\n");
drm_free(bitfield,
bitfield_length * sizeof(*bitfield),
DRM_MEM_BUFS);
return DRM_ERR(ENOMEM);
}
}
bitfield[i] = 0;
}
draw.handle = i * 8 * sizeof(*bitfield) + j + 1;
DRM_DEBUG("%d\n", draw.handle);
spin_lock_irqsave(&dev->drw_lock, irqflags);
bitfield[i] |= 1 << j;
info[draw.handle - 1] = NULL;
if (bitfield != dev->drw_bitfield) {
memcpy(bitfield, dev->drw_bitfield, dev->drw_bitfield_length *
sizeof(*bitfield));
drm_free(dev->drw_bitfield, sizeof(*bitfield) *
dev->drw_bitfield_length, DRM_MEM_BUFS);
dev->drw_bitfield = bitfield;
dev->drw_bitfield_length = bitfield_length;
}
if (info != dev->drw_info) {
memcpy(info, dev->drw_info, dev->drw_info_length *
sizeof(*info));
drm_free(dev->drw_info, sizeof(*info) * dev->drw_info_length,
DRM_MEM_BUFS);
dev->drw_info = info;
dev->drw_info_length = info_length;
ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id);
if (ret == -EAGAIN) {
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
goto again;
}
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
draw.handle = new_id;
DRM_DEBUG("%d\n", draw.handle);
DRM_COPY_TO_USER_IOCTL((drm_draw_t __user *)data, draw, sizeof(draw));
return 0;
@ -132,122 +79,44 @@ int drm_rmdraw(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_draw_t draw;
int id, idx;
unsigned int shift;
unsigned long irqflags;
u32 *bitfield = dev->drw_bitfield;
unsigned int bitfield_length = dev->drw_bitfield_length;
drm_drawable_info_t **info = dev->drw_info;
unsigned int info_length = dev->drw_info_length;
DRM_COPY_FROM_USER_IOCTL(draw, (drm_draw_t __user *) data,
sizeof(draw));
id = draw.handle - 1;
idx = id / (8 * sizeof(*bitfield));
shift = id % (8 * sizeof(*bitfield));
if (idx < 0 || idx >= bitfield_length ||
!(bitfield[idx] & (1 << shift))) {
DRM_DEBUG("No such drawable %d\n", draw.handle);
return 0;
}
spin_lock_irqsave(&dev->drw_lock, irqflags);
bitfield[idx] &= ~(1 << shift);
drm_free(drm_get_drawable_info(dev, draw.handle),
sizeof(struct drm_drawable_info), DRM_MEM_BUFS);
idr_remove(&dev->drw_idr, draw.handle);
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
if (info[id]) {
drm_free(info[id]->rects, info[id]->num_rects *
sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
drm_free(info[id], sizeof(**info), DRM_MEM_BUFS);
}
/* Can we shrink the arrays? */
if (idx == bitfield_length - 1) {
while (idx >= 0 && !bitfield[idx])
--idx;
bitfield_length = idx + 1;
if (idx != id / (8 * sizeof(*bitfield)))
bitfield = drm_alloc(bitfield_length *
sizeof(*bitfield), DRM_MEM_BUFS);
if (!bitfield && bitfield_length) {
bitfield = dev->drw_bitfield;
bitfield_length = dev->drw_bitfield_length;
}
}
if (bitfield != dev->drw_bitfield) {
info_length = 8 * sizeof(*bitfield) * bitfield_length;
info = drm_alloc(info_length * sizeof(*info), DRM_MEM_BUFS);
if (!info && info_length) {
info = dev->drw_info;
info_length = dev->drw_info_length;
}
spin_lock_irqsave(&dev->drw_lock, irqflags);
memcpy(bitfield, dev->drw_bitfield, bitfield_length *
sizeof(*bitfield));
drm_free(dev->drw_bitfield, sizeof(*bitfield) *
dev->drw_bitfield_length, DRM_MEM_BUFS);
dev->drw_bitfield = bitfield;
dev->drw_bitfield_length = bitfield_length;
if (info != dev->drw_info) {
memcpy(info, dev->drw_info, info_length *
sizeof(*info));
drm_free(dev->drw_info, sizeof(*info) *
dev->drw_info_length, DRM_MEM_BUFS);
dev->drw_info = info;
dev->drw_info_length = info_length;
}
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
}
DRM_DEBUG("%d\n", draw.handle);
return 0;
}
int drm_update_drawable_info(DRM_IOCTL_ARGS) {
int drm_update_drawable_info(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_update_draw_t update;
unsigned int id, idx, shift, bitfield_length = dev->drw_bitfield_length;
u32 *bitfield = dev->drw_bitfield;
unsigned long irqflags;
drm_drawable_info_t *info;
drm_clip_rect_t *rects;
struct drm_drawable_info *info;
int err;
DRM_COPY_FROM_USER_IOCTL(update, (drm_update_draw_t __user *) data,
sizeof(update));
id = update.handle - 1;
idx = id / (8 * sizeof(*bitfield));
shift = id % (8 * sizeof(*bitfield));
if (idx < 0 || idx >= bitfield_length ||
!(bitfield[idx] & (1 << shift))) {
DRM_ERROR("No such drawable %d\n", update.handle);
return DRM_ERR(EINVAL);
}
info = dev->drw_info[id];
info = idr_find(&dev->drw_idr, update.handle);
if (!info) {
info = drm_calloc(1, sizeof(drm_drawable_info_t), DRM_MEM_BUFS);
if (!info) {
DRM_ERROR("Failed to allocate drawable info memory\n");
return DRM_ERR(ENOMEM);
info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS);
if (!info)
return -ENOMEM;
if (IS_ERR(idr_replace(&dev->drw_idr, info, update.handle))) {
DRM_ERROR("No such drawable %d\n", update.handle);
drm_free(info, sizeof(*info), DRM_MEM_BUFS);
return -EINVAL;
}
}
@ -284,12 +153,11 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) {
info->rects = rects;
info->num_rects = update.num;
dev->drw_info[id] = info;
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
DRM_DEBUG("Updated %d cliprects for drawable %d\n",
info->num_rects, id);
info->num_rects, update.handle);
break;
default:
DRM_ERROR("Invalid update type %d\n", update.type);
@ -299,11 +167,9 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) {
return 0;
error:
if (!dev->drw_info[id])
drm_free(info, sizeof(*info), DRM_MEM_BUFS);
else if (rects != dev->drw_info[id]->rects)
drm_free(rects, update.num *
sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
if (rects != info->rects)
drm_free(rects, update.num * sizeof(drm_clip_rect_t),
DRM_MEM_BUFS);
return err;
}
@ -311,20 +177,27 @@ error:
/**
* Caller must hold the drawable spinlock!
*/
drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) {
u32 *bitfield = dev->drw_bitfield;
unsigned int idx, shift;
id--;
idx = id / (8 * sizeof(*bitfield));
shift = id % (8 * sizeof(*bitfield));
if (idx < 0 || idx >= dev->drw_bitfield_length ||
!(bitfield[idx] & (1 << shift))) {
DRM_DEBUG("No such drawable %d\n", id);
return NULL;
}
return dev->drw_info[id];
drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id)
{
return idr_find(&dev->drw_idr, id);
}
EXPORT_SYMBOL(drm_get_drawable_info);
static int drm_drawable_free(int idr, void *p, void *data)
{
struct drm_drawable_info *info = p;
if (info) {
drm_free(info->rects, info->num_rects *
sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
drm_free(info, sizeof(*info), DRM_MEM_BUFS);
}
return 0;
}
void drm_drawable_free_all(drm_device_t *dev)
{
idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
idr_remove_all(&dev->drw_idr);
}

View File

@ -113,7 +113,7 @@ static drm_ioctl_desc_t drm_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
#endif
[DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
[DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0},
@ -171,8 +171,8 @@ static drm_ioctl_desc_t drm_ioctls[] = {
int drm_lastclose(drm_device_t * dev)
{
drm_magic_entry_t *pt, *next;
drm_map_list_t *r_list;
drm_vma_entry_t *vma, *vma_next;
drm_map_list_t *r_list, *list_t;
drm_vma_entry_t *vma, *vma_temp;
int i;
DRM_DEBUG("\n");
@ -197,18 +197,9 @@ int drm_lastclose(drm_device_t * dev)
drm_irq_uninstall(dev);
/* Free drawable information memory */
for (i = 0; i < dev->drw_bitfield_length / sizeof(*dev->drw_bitfield);
i++) {
drm_drawable_info_t *info = drm_get_drawable_info(dev, i);
if (info) {
drm_free(info->rects, info->num_rects *
sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
drm_free(info, sizeof(*info), DRM_MEM_BUFS);
}
}
mutex_lock(&dev->struct_mutex);
drm_drawable_free_all(dev);
del_timer(&dev->timer);
if (dev->unique) {
@ -229,19 +220,17 @@ int drm_lastclose(drm_device_t * dev)
/* Clear AGP information */
if (drm_core_has_AGP(dev) && dev->agp) {
drm_agp_mem_t *entry;
drm_agp_mem_t *nexte;
drm_agp_mem_t *entry, *tempe;
/* Remove AGP resources, but leave dev->agp
intact until drv_cleanup is called. */
for (entry = dev->agp->memory; entry; entry = nexte) {
nexte = entry->next;
list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
if (entry->bound)
drm_unbind_agp(entry->memory);
drm_free_agp(entry->memory, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
}
dev->agp->memory = NULL;
INIT_LIST_HEAD(&dev->agp->memory);
if (dev->agp->acquired)
drm_agp_release(dev);
@ -255,20 +244,14 @@ int drm_lastclose(drm_device_t * dev)
}
/* Clear vma list (only built for debugging) */
if (dev->vmalist) {
for (vma = dev->vmalist; vma; vma = vma_next) {
vma_next = vma->next;
drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
dev->vmalist = NULL;
list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
list_del(&vma->head);
drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
if (dev->maplist) {
while (!list_empty(&dev->maplist->head)) {
struct list_head *list = dev->maplist->head.next;
r_list = list_entry(list, drm_map_list_t, head);
drm_rmmap_locked(dev, r_list->map);
}
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
drm_rmmap_locked(dev, r_list->map);
r_list = NULL;
}
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
@ -359,7 +342,7 @@ int drm_init(struct drm_driver *driver,
}
if (!drm_fb_loaded)
pci_register_driver(&driver->pci_driver);
return pci_register_driver(&driver->pci_driver);
else {
for (i = 0; pciidlist[i].vendor != 0; i++) {
pid = &pciidlist[i];
@ -403,13 +386,9 @@ static void drm_cleanup(drm_device_t * dev)
drm_lastclose(dev);
drm_fence_manager_takedown(dev);
if (dev->maplist) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
dev->maplist = NULL;
drm_ht_remove(&dev->map_hash);
drm_mm_takedown(&dev->offset_manager);
drm_ht_remove(&dev->object_hash);
}
drm_ht_remove(&dev->map_hash);
drm_mm_takedown(&dev->offset_manager);
drm_ht_remove(&dev->object_hash);
if (!drm_fb_loaded)
pci_disable_device(dev->pdev);
@ -625,7 +604,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
goto err_i1;
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
&& (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE))
ioctl = &drm_ioctls[nr];
else
@ -654,39 +633,11 @@ err_i1:
}
EXPORT_SYMBOL(drm_ioctl);
int drm_wait_on(drm_device_t *dev, wait_queue_head_t *queue, int timeout,
int (*fn)(drm_device_t *dev, void *priv), void *priv)
{
DECLARE_WAITQUEUE(entry, current);
unsigned long end = jiffies + (timeout);
int ret = 0;
add_wait_queue(queue, &entry);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
if ((*fn)(dev, priv))
break;
if (time_after_eq(jiffies, end)) {
ret = -EBUSY;
break;
}
schedule_timeout((HZ/100 > 1) ? HZ/100 : 1);
if (signal_pending(current)) {
ret = -EINTR;
break;
}
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(queue, &entry);
return ret;
}
EXPORT_SYMBOL(drm_wait_on);
drm_local_map_t *drm_getsarea(struct drm_device *dev)
{
drm_map_list_t *entry;
list_for_each_entry(entry, &dev->maplist->head, head) {
list_for_each_entry(entry, &dev->maplist, head) {
if (entry->map && entry->map->type == _DRM_SHM &&
(entry->map->flags & _DRM_CONTAINS_LOCK)) {
return entry->map;

View File

@ -124,52 +124,76 @@ static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
write_unlock_irqrestore(&fm->lock, flags);
}
void drm_fence_usage_deref_locked(drm_device_t * dev,
drm_fence_object_t * fence)
void drm_fence_usage_deref_locked(drm_fence_object_t ** fence)
{
struct drm_fence_object *tmp_fence = *fence;
struct drm_device *dev = tmp_fence->dev;
drm_fence_manager_t *fm = &dev->fm;
if (atomic_dec_and_test(&fence->usage)) {
drm_fence_unring(dev, &fence->ring);
DRM_ASSERT_LOCKED(&dev->struct_mutex);
*fence = NULL;
if (atomic_dec_and_test(&tmp_fence->usage)) {
drm_fence_unring(dev, &tmp_fence->ring);
DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
fence->base.hash.key);
tmp_fence->base.hash.key);
atomic_dec(&fm->count);
drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE);
BUG_ON(!list_empty(&tmp_fence->base.list));
drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
}
}
void drm_fence_usage_deref_unlocked(drm_device_t * dev,
drm_fence_object_t * fence)
void drm_fence_usage_deref_unlocked(drm_fence_object_t ** fence)
{
struct drm_fence_object *tmp_fence = *fence;
struct drm_device *dev = tmp_fence->dev;
drm_fence_manager_t *fm = &dev->fm;
if (atomic_dec_and_test(&fence->usage)) {
*fence = NULL;
if (atomic_dec_and_test(&tmp_fence->usage)) {
mutex_lock(&dev->struct_mutex);
if (atomic_read(&fence->usage) == 0) {
drm_fence_unring(dev, &fence->ring);
if (atomic_read(&tmp_fence->usage) == 0) {
drm_fence_unring(dev, &tmp_fence->ring);
atomic_dec(&fm->count);
drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE);
BUG_ON(!list_empty(&tmp_fence->base.list));
drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
}
mutex_unlock(&dev->struct_mutex);
}
}
static void drm_fence_object_destroy(drm_file_t * priv,
drm_user_object_t * base)
struct drm_fence_object
*drm_fence_reference_locked(struct drm_fence_object *src)
{
DRM_ASSERT_LOCKED(&src->dev->struct_mutex);
atomic_inc(&src->usage);
return src;
}
void drm_fence_reference_unlocked(struct drm_fence_object **dst,
struct drm_fence_object *src)
{
mutex_lock(&src->dev->struct_mutex);
*dst = src;
atomic_inc(&src->usage);
mutex_unlock(&src->dev->struct_mutex);
}
static void drm_fence_object_destroy(drm_file_t *priv, drm_user_object_t * base)
{
drm_device_t *dev = priv->head->dev;
drm_fence_object_t *fence =
drm_user_object_entry(base, drm_fence_object_t, base);
drm_fence_usage_deref_locked(dev, fence);
drm_fence_usage_deref_locked(&fence);
}
static int fence_signaled(drm_device_t * dev,
drm_fence_object_t * fence,
uint32_t mask, int poke_flush)
int drm_fence_object_signaled(drm_fence_object_t * fence,
uint32_t mask, int poke_flush)
{
unsigned long flags;
int signaled;
struct drm_device *dev = fence->dev;
drm_fence_manager_t *fm = &dev->fm;
drm_fence_driver_t *driver = dev->driver->fence_driver;
@ -200,16 +224,10 @@ static void drm_fence_flush_exe(drm_fence_class_manager_t * fc,
}
}
int drm_fence_object_signaled(drm_fence_object_t * fence,
uint32_t type)
{
return ((fence->signaled & type) == type);
}
int drm_fence_object_flush(drm_device_t * dev,
drm_fence_object_t * fence,
int drm_fence_object_flush(drm_fence_object_t * fence,
uint32_t type)
{
struct drm_device *dev = fence->dev;
drm_fence_manager_t *fm = &dev->fm;
drm_fence_class_manager_t *fc = &fm->class[fence->class];
drm_fence_driver_t *driver = dev->driver->fence_driver;
@ -272,24 +290,23 @@ void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence)
mutex_unlock(&dev->struct_mutex);
return;
}
fence = list_entry(fc->ring.next, drm_fence_object_t, ring);
atomic_inc(&fence->usage);
fence = drm_fence_reference_locked(list_entry(fc->ring.next, drm_fence_object_t, ring));
mutex_unlock(&dev->struct_mutex);
diff = (old_sequence - fence->sequence) & driver->sequence_mask;
read_unlock_irqrestore(&fm->lock, flags);
if (diff < driver->wrap_diff) {
drm_fence_object_flush(dev, fence, fence->type);
drm_fence_object_flush(fence, fence->type);
}
drm_fence_usage_deref_unlocked(dev, fence);
drm_fence_usage_deref_unlocked(&fence);
}
EXPORT_SYMBOL(drm_fence_flush_old);
static int drm_fence_lazy_wait(drm_device_t *dev,
drm_fence_object_t *fence,
static int drm_fence_lazy_wait(drm_fence_object_t *fence,
int ignore_signals,
uint32_t mask)
{
struct drm_device *dev = fence->dev;
drm_fence_manager_t *fm = &dev->fm;
drm_fence_class_manager_t *fc = &fm->class[fence->class];
int signaled;
@ -298,13 +315,13 @@ static int drm_fence_lazy_wait(drm_device_t *dev,
do {
DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
(signaled = fence_signaled(dev, fence, mask, 1)));
(signaled = drm_fence_object_signaled(fence, mask, 1)));
if (signaled)
return 0;
if (time_after_eq(jiffies, _end))
break;
} while (ret == -EINTR && ignore_signals);
if (fence_signaled(dev, fence, mask, 0))
if (drm_fence_object_signaled(fence, mask, 0))
return 0;
if (time_after_eq(jiffies, _end))
ret = -EBUSY;
@ -319,10 +336,10 @@ static int drm_fence_lazy_wait(drm_device_t *dev,
return 0;
}
int drm_fence_object_wait(drm_device_t * dev,
drm_fence_object_t * fence,
int drm_fence_object_wait(drm_fence_object_t * fence,
int lazy, int ignore_signals, uint32_t mask)
{
struct drm_device *dev = fence->dev;
drm_fence_driver_t *driver = dev->driver->fence_driver;
int ret = 0;
unsigned long _end;
@ -334,16 +351,16 @@ int drm_fence_object_wait(drm_device_t * dev,
return -EINVAL;
}
if (fence_signaled(dev, fence, mask, 0))
if (drm_fence_object_signaled(fence, mask, 0))
return 0;
_end = jiffies + 3 * DRM_HZ;
drm_fence_object_flush(dev, fence, mask);
drm_fence_object_flush(fence, mask);
if (lazy && driver->lazy_capable) {
ret = drm_fence_lazy_wait(dev, fence, ignore_signals, mask);
ret = drm_fence_lazy_wait(fence, ignore_signals, mask);
if (ret)
return ret;
@ -351,7 +368,7 @@ int drm_fence_object_wait(drm_device_t * dev,
if (driver->has_irq(dev, fence->class,
DRM_FENCE_TYPE_EXE)) {
ret = drm_fence_lazy_wait(dev, fence, ignore_signals,
ret = drm_fence_lazy_wait(fence, ignore_signals,
DRM_FENCE_TYPE_EXE);
if (ret)
return ret;
@ -359,13 +376,13 @@ int drm_fence_object_wait(drm_device_t * dev,
if (driver->has_irq(dev, fence->class,
mask & ~DRM_FENCE_TYPE_EXE)) {
ret = drm_fence_lazy_wait(dev, fence, ignore_signals,
ret = drm_fence_lazy_wait(fence, ignore_signals,
mask);
if (ret)
return ret;
}
}
if (drm_fence_object_signaled(fence, mask))
if (drm_fence_object_signaled(fence, mask, 0))
return 0;
/*
@ -377,7 +394,7 @@ int drm_fence_object_wait(drm_device_t * dev,
#endif
do {
schedule();
signaled = fence_signaled(dev, fence, mask, 1);
signaled = drm_fence_object_signaled(fence, mask, 1);
} while (!signaled && !time_after_eq(jiffies, _end));
if (!signaled)
@ -386,9 +403,10 @@ int drm_fence_object_wait(drm_device_t * dev,
return 0;
}
int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
int drm_fence_object_emit(drm_fence_object_t * fence,
uint32_t fence_flags, uint32_t class, uint32_t type)
{
struct drm_device *dev = fence->dev;
drm_fence_manager_t *fm = &dev->fm;
drm_fence_driver_t *driver = dev->driver->fence_driver;
drm_fence_class_manager_t *fc = &fm->class[fence->class];
@ -432,15 +450,22 @@ static int drm_fence_object_init(drm_device_t * dev, uint32_t class,
write_lock_irqsave(&fm->lock, flags);
INIT_LIST_HEAD(&fence->ring);
/*
* Avoid hitting BUG() for kernel-only fence objects.
*/
INIT_LIST_HEAD(&fence->base.list);
fence->class = class;
fence->type = type;
fence->flush_mask = 0;
fence->submitted_flush = 0;
fence->signaled = 0;
fence->sequence = 0;
fence->dev = dev;
write_unlock_irqrestore(&fm->lock, flags);
if (fence_flags & DRM_FENCE_FLAG_EMIT) {
ret = drm_fence_object_emit(dev, fence, fence_flags,
ret = drm_fence_object_emit(fence, fence_flags,
fence->class, type);
}
return ret;
@ -454,15 +479,16 @@ int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence,
mutex_lock(&dev->struct_mutex);
ret = drm_add_user_object(priv, &fence->base, shareable);
mutex_unlock(&dev->struct_mutex);
if (ret)
return ret;
goto out;
atomic_inc(&fence->usage);
fence->base.type = drm_fence_type;
fence->base.remove = &drm_fence_object_destroy;
DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
return 0;
out:
mutex_unlock(&dev->struct_mutex);
return ret;
}
EXPORT_SYMBOL(drm_fence_add_user_object);
int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type,
@ -472,12 +498,12 @@ int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type,
int ret;
drm_fence_manager_t *fm = &dev->fm;
fence = drm_ctl_alloc(sizeof(*fence), DRM_MEM_FENCE);
fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
if (!fence)
return -ENOMEM;
ret = drm_fence_object_init(dev, class, type, flags, fence);
if (ret) {
drm_fence_usage_deref_unlocked(dev, fence);
drm_fence_usage_deref_unlocked(&fence);
return ret;
}
*c_fence = fence;
@ -534,8 +560,7 @@ drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
mutex_unlock(&dev->struct_mutex);
return NULL;
}
fence = drm_user_object_entry(uo, drm_fence_object_t, base);
atomic_inc(&fence->usage);
fence = drm_fence_reference_locked(drm_user_object_entry(uo, drm_fence_object_t, base));
mutex_unlock(&dev->struct_mutex);
return fence;
}
@ -556,7 +581,6 @@ int drm_fence_create_ioctl(DRM_IOCTL_ARGS)
}
DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
if (arg.flags & DRM_FENCE_FLAG_EMIT)
LOCK_TEST_WITH_RETURN(dev, filp);
ret = drm_fence_object_create(dev, arg.class,
@ -567,7 +591,7 @@ int drm_fence_create_ioctl(DRM_IOCTL_ARGS)
arg.flags &
DRM_FENCE_FLAG_SHAREABLE);
if (ret) {
drm_fence_usage_deref_unlocked(dev, fence);
drm_fence_usage_deref_unlocked(&fence);
return ret;
}
@ -583,7 +607,7 @@ int drm_fence_create_ioctl(DRM_IOCTL_ARGS)
arg.type = fence->type;
arg.signaled = fence->signaled;
read_unlock_irqrestore(&fm->lock, flags);
drm_fence_usage_deref_unlocked(dev, fence);
drm_fence_usage_deref_unlocked(&fence);
DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
return ret;
@ -644,7 +668,7 @@ int drm_fence_reference_ioctl(DRM_IOCTL_ARGS)
arg.type = fence->type;
arg.signaled = fence->signaled;
read_unlock_irqrestore(&fm->lock, flags);
drm_fence_usage_deref_unlocked(dev, fence);
drm_fence_usage_deref_unlocked(&fence);
DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
return ret;
@ -694,7 +718,7 @@ int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS)
arg.type = fence->type;
arg.signaled = fence->signaled;
read_unlock_irqrestore(&fm->lock, flags);
drm_fence_usage_deref_unlocked(dev, fence);
drm_fence_usage_deref_unlocked(&fence);
DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
return ret;
@ -720,14 +744,14 @@ int drm_fence_flush_ioctl(DRM_IOCTL_ARGS)
fence = drm_lookup_fence_object(priv, arg.handle);
if (!fence)
return -EINVAL;
ret = drm_fence_object_flush(dev, fence, arg.type);
ret = drm_fence_object_flush(fence, arg.type);
read_lock_irqsave(&fm->lock, flags);
arg.class = fence->class;
arg.type = fence->type;
arg.signaled = fence->signaled;
read_unlock_irqrestore(&fm->lock, flags);
drm_fence_usage_deref_unlocked(dev, fence);
drm_fence_usage_deref_unlocked(&fence);
DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
return ret;
@ -754,7 +778,7 @@ int drm_fence_wait_ioctl(DRM_IOCTL_ARGS)
fence = drm_lookup_fence_object(priv, arg.handle);
if (!fence)
return -EINVAL;
ret = drm_fence_object_wait(dev, fence,
ret = drm_fence_object_wait(fence,
arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
0, arg.type);
@ -763,7 +787,7 @@ int drm_fence_wait_ioctl(DRM_IOCTL_ARGS)
arg.type = fence->type;
arg.signaled = fence->signaled;
read_unlock_irqrestore(&fm->lock, flags);
drm_fence_usage_deref_unlocked(dev, fence);
drm_fence_usage_deref_unlocked(&fence);
DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
return ret;
@ -791,7 +815,7 @@ int drm_fence_emit_ioctl(DRM_IOCTL_ARGS)
fence = drm_lookup_fence_object(priv, arg.handle);
if (!fence)
return -EINVAL;
ret = drm_fence_object_emit(dev, fence, arg.flags, arg.class,
ret = drm_fence_object_emit(fence, arg.flags, arg.class,
arg.type);
read_lock_irqsave(&fm->lock, flags);
@ -799,7 +823,7 @@ int drm_fence_emit_ioctl(DRM_IOCTL_ARGS)
arg.type = fence->type;
arg.signaled = fence->signaled;
read_unlock_irqrestore(&fm->lock, flags);
drm_fence_usage_deref_unlocked(dev, fence);
drm_fence_usage_deref_unlocked(&fence);
DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
return ret;
@ -844,7 +868,7 @@ int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS)
arg.type = fence->type;
arg.signaled = fence->signaled;
read_unlock_irqrestore(&fm->lock, flags);
drm_fence_usage_deref_unlocked(dev, fence);
drm_fence_usage_deref_unlocked(&fence);
DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
return ret;

View File

@ -79,13 +79,6 @@ static int drm_setup(drm_device_t * dev)
drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER);
INIT_LIST_HEAD(&dev->magicfree);
dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist), DRM_MEM_CTXLIST);
if (dev->ctxlist == NULL)
return -ENOMEM;
memset(dev->ctxlist, 0, sizeof(*dev->ctxlist));
INIT_LIST_HEAD(&dev->ctxlist->head);
dev->vmalist = NULL;
dev->sigdata.lock = NULL;
init_waitqueue_head(&dev->lock.lock_queue);
dev->queue_count = 0;
@ -268,6 +261,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
priv->authenticated = capable(CAP_SYS_ADMIN);
priv->lock_count = 0;
INIT_LIST_HEAD(&priv->lhead);
INIT_LIST_HEAD(&priv->user_objects);
INIT_LIST_HEAD(&priv->refd_objects);
@ -291,19 +285,10 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
}
mutex_lock(&dev->struct_mutex);
if (!dev->file_last) {
priv->next = NULL;
priv->prev = NULL;
dev->file_first = priv;
dev->file_last = priv;
/* first opener automatically becomes master */
if (list_empty(&dev->filelist))
priv->master = 1;
} else {
priv->next = NULL;
priv->prev = dev->file_last;
dev->file_last->next = priv;
dev->file_last = priv;
}
list_add(&priv->lhead, &dev->filelist);
mutex_unlock(&dev->struct_mutex);
#ifdef __alpha__
@ -480,10 +465,10 @@ int drm_release(struct inode *inode, struct file *filp)
mutex_lock(&dev->ctxlist_mutex);
if (dev->ctxlist && (!list_empty(&dev->ctxlist->head))) {
if (!list_empty(&dev->ctxlist)) {
drm_ctx_list_t *pos, *n;
list_for_each_entry_safe(pos, n, &dev->ctxlist->head, head) {
list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
if (pos->tag == priv &&
pos->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
@ -503,22 +488,12 @@ int drm_release(struct inode *inode, struct file *filp)
mutex_lock(&dev->struct_mutex);
drm_object_release(filp);
if (priv->remove_auth_on_close == 1) {
drm_file_t *temp = dev->file_first;
while (temp) {
drm_file_t *temp;
list_for_each_entry(temp, &dev->filelist, lhead)
temp->authenticated = 0;
temp = temp->next;
}
}
if (priv->prev) {
priv->prev->next = priv->next;
} else {
dev->file_first = priv->next;
}
if (priv->next) {
priv->next->prev = priv->prev;
} else {
dev->file_last = priv->prev;
}
list_del(&priv->lhead);
mutex_unlock(&dev->struct_mutex);
if (dev->driver->postclose)

View File

@ -28,7 +28,6 @@
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
#include <linux/ioctl32.h>
#include "drmP.h"
#include "drm_core.h"

View File

@ -199,7 +199,7 @@ int drm_getmap(struct inode *inode, struct file *filp,
}
i = 0;
list_for_each(list, &dev->maplist->head) {
list_for_each(list, &dev->maplist) {
if (i == idx) {
r_list = list_entry(list, drm_map_list_t, head);
break;
@ -252,12 +252,18 @@ int drm_getclient(struct inode *inode, struct file *filp,
return -EFAULT;
idx = client.idx;
mutex_lock(&dev->struct_mutex);
for (i = 0, pt = dev->file_first; i < idx && pt; i++, pt = pt->next) ;
if (!pt) {
if (list_empty(&dev->filelist)) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
i = 0;
list_for_each_entry(pt, &dev->filelist, lhead) {
if (i++ >= idx)
break;
}
client.auth = pt->authenticated;
client.pid = pt->pid;
client.uid = pt->uid;

View File

@ -119,8 +119,8 @@ static int drm_irq_install(drm_device_t * dev)
spin_lock_init(&dev->vbl_lock);
INIT_LIST_HEAD(&dev->vbl_sigs.head);
INIT_LIST_HEAD(&dev->vbl_sigs2.head);
INIT_LIST_HEAD(&dev->vbl_sigs);
INIT_LIST_HEAD(&dev->vbl_sigs2);
dev->vbl_pending = 0;
}
@ -290,7 +290,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
if (flags & _DRM_VBLANK_SIGNAL) {
unsigned long irqflags;
drm_vbl_sig_t *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
? &dev->vbl_sigs2 : &dev->vbl_sigs;
drm_vbl_sig_t *vbl_sig;
@ -300,7 +300,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
* for the same vblank sequence number; nothing to be done in
* that case
*/
list_for_each_entry(vbl_sig, &vbl_sigs->head, head) {
list_for_each_entry(vbl_sig, vbl_sigs, head) {
if (vbl_sig->sequence == vblwait.request.sequence
&& vbl_sig->info.si_signo == vblwait.request.signal
&& vbl_sig->task == current) {
@ -334,7 +334,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
spin_lock_irqsave(&dev->vbl_lock, irqflags);
list_add_tail((struct list_head *)vbl_sig, &vbl_sigs->head);
list_add_tail(&vbl_sig->head, vbl_sigs);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@ -377,20 +377,18 @@ void drm_vbl_send_signals(drm_device_t * dev)
spin_lock_irqsave(&dev->vbl_lock, flags);
for (i = 0; i < 2; i++) {
struct list_head *list, *tmp;
drm_vbl_sig_t *vbl_sig;
drm_vbl_sig_t *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
drm_vbl_sig_t *vbl_sig, *tmp;
struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
&dev->vbl_received);
list_for_each_safe(list, tmp, &vbl_sigs->head) {
vbl_sig = list_entry(list, drm_vbl_sig_t, head);
list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
vbl_sig->info.si_code = vbl_seq;
send_sig_info(vbl_sig->info.si_signo,
&vbl_sig->info, vbl_sig->task);
list_del(list);
list_del(&vbl_sig->head);
drm_free(vbl_sig, sizeof(*vbl_sig),
DRM_MEM_DRIVER);

View File

@ -228,7 +228,7 @@ static void *agp_remap(unsigned long offset, unsigned long size,
offset -= dev->hose->mem_space->start;
#endif
for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next)
list_for_each_entry(agpmem, &dev->agp->memory, head)
if (agpmem->bound <= offset
&& (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
(offset + size))

View File

@ -36,6 +36,8 @@ int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item,
drm_device_t *dev = priv->head->dev;
int ret;
DRM_ASSERT_LOCKED(&dev->struct_mutex);
atomic_set(&item->refcount, 1);
item->shareable = shareable;
item->owner = priv;
@ -56,6 +58,8 @@ drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key)
int ret;
drm_user_object_t *item;
DRM_ASSERT_LOCKED(&dev->struct_mutex);
ret = drm_ht_find_item(&dev->object_hash, key, &hash);
if (ret) {
return NULL;
@ -88,6 +92,8 @@ static void drm_deref_user_object(drm_file_t * priv, drm_user_object_t * item)
int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item)
{
DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex);
if (item->owner != priv) {
DRM_ERROR("Cannot destroy object not owned by you.\n");
return -EINVAL;
@ -125,6 +131,7 @@ int drm_add_ref_object(drm_file_t * priv, drm_user_object_t * referenced_object,
drm_ref_object_t *item;
drm_open_hash_t *ht = &priv->refd_object_hash[ref_action];
DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex);
if (!referenced_object->shareable && priv != referenced_object->owner) {
DRM_ERROR("Not allowed to reference this object\n");
return -EINVAL;
@ -181,6 +188,7 @@ drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv,
drm_hash_item_t *hash;
int ret;
DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex);
ret = drm_ht_find_item(&priv->refd_object_hash[ref_action],
(unsigned long)referenced_object, &hash);
if (ret)
@ -213,6 +221,7 @@ void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item)
drm_open_hash_t *ht = &priv->refd_object_hash[item->unref_action];
drm_ref_t unref_action;
DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex);
unref_action = item->unref_action;
if (atomic_dec_and_test(&item->refcount)) {
ret = drm_ht_remove_item(ht, &item->hash);

View File

@ -29,7 +29,7 @@
*/
#ifndef _DRM_OBJECTS_H
#define _DRM_OJBECTS_H
#define _DRM_OBJECTS_H
struct drm_device;
@ -141,6 +141,7 @@ extern int drm_user_object_unref(drm_file_t * priv, uint32_t user_token,
typedef struct drm_fence_object {
drm_user_object_t base;
struct drm_device *dev;
atomic_t usage;
/*
@ -196,15 +197,15 @@ extern void drm_fence_manager_init(struct drm_device *dev);
extern void drm_fence_manager_takedown(struct drm_device *dev);
extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class,
uint32_t sequence);
extern int drm_fence_object_flush(struct drm_device *dev,
drm_fence_object_t * fence, uint32_t type);
extern int drm_fence_object_signaled(drm_fence_object_t * fence, uint32_t type);
extern void drm_fence_usage_deref_locked(struct drm_device *dev,
drm_fence_object_t * fence);
extern void drm_fence_usage_deref_unlocked(struct drm_device *dev,
drm_fence_object_t * fence);
extern int drm_fence_object_wait(struct drm_device *dev,
drm_fence_object_t * fence,
extern int drm_fence_object_flush(drm_fence_object_t * fence, uint32_t type);
extern int drm_fence_object_signaled(drm_fence_object_t * fence,
uint32_t type, int flush);
extern void drm_fence_usage_deref_locked(drm_fence_object_t ** fence);
extern void drm_fence_usage_deref_unlocked(drm_fence_object_t ** fence);
extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src);
extern void drm_fence_reference_unlocked(struct drm_fence_object **dst,
struct drm_fence_object *src);
extern int drm_fence_object_wait(drm_fence_object_t * fence,
int lazy, int ignore_signals, uint32_t mask);
extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
uint32_t fence_flags, uint32_t class,
@ -460,7 +461,7 @@ extern int drm_bo_pci_offset(struct drm_device *dev,
unsigned long *bus_size);
extern int drm_mem_reg_is_pci(struct drm_device *dev, drm_bo_mem_reg_t * mem);
extern void drm_bo_usage_deref_locked(drm_buffer_object_t * bo);
extern void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo);
extern int drm_fence_buffer_objects(drm_file_t * priv,
struct list_head *list,
uint32_t fence_flags,
@ -492,4 +493,12 @@ extern int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo,
uint32_t fence_flags,
drm_bo_mem_reg_t * new_mem);
#ifdef CONFIG_DEBUG_MUTEXES
#define DRM_ASSERT_LOCKED(_mutex) \
BUG_ON(!mutex_is_locked(_mutex) || \
((_mutex)->owner != current_thread_info()))
#else
#define DRM_ASSERT_LOCKED(_mutex)
#endif
#endif

View File

@ -51,10 +51,8 @@ drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
dma_addr_t maxaddr)
{
drm_dma_handle_t *dmah;
#if 1
unsigned long addr;
size_t sz;
#endif
#ifdef DRM_DEBUG_MEMORY
int area = DRM_MEM_DMA;

View File

@ -211,7 +211,6 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
int len = 0;
drm_map_t *map;
drm_map_list_t *r_list;
struct list_head *list;
/* Hardcoded from _DRM_FRAME_BUFFER,
_DRM_REGISTERS, _DRM_SHM, _DRM_AGP,
@ -231,9 +230,7 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
DRM_PROC_PRINT("slot offset size type flags "
"address mtrr\n\n");
i = 0;
if (dev->maplist != NULL)
list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head);
list_for_each_entry(r_list, &dev->maplist, head) {
map = r_list->map;
if (!map)
continue;
@ -242,10 +239,10 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
else
type = types[map->type];
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
i,
map->offset,
map->size, type, map->flags,
(unsigned long) r_list->user_token);
i,
map->offset,
map->size, type, map->flags,
(unsigned long) r_list->user_token);
if (map->mtrr < 0) {
DRM_PROC_PRINT("none\n");
@ -253,7 +250,7 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
DRM_PROC_PRINT("%4d\n", map->mtrr);
}
i++;
}
}
if (len > request + offset)
return request;
@ -535,7 +532,7 @@ static int drm__clients_info(char *buf, char **start, off_t offset,
*eof = 0;
DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n");
for (priv = dev->file_first; priv; priv = priv->next) {
list_for_each_entry(priv, &dev->filelist, lhead) {
DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
priv->authenticated ? 'y' : 'n',
priv->minor,
@ -588,7 +585,7 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
atomic_read(&dev->vma_count),
high_memory, virt_to_phys(high_memory));
for (pt = dev->vmalist; pt; pt = pt->next) {
list_for_each_entry(pt, &dev->vmalist, head) {
if (!(vma = pt->vma))
continue;
DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",

View File

@ -55,6 +55,7 @@ void drm_sg_cleanup(drm_sg_mem_t * entry)
entry->pages * sizeof(*entry->pagelist), DRM_MEM_PAGES);
drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
}
EXPORT_SYMBOL(drm_sg_cleanup);
#ifdef _LP64
# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
@ -62,13 +63,8 @@ void drm_sg_cleanup(drm_sg_mem_t * entry)
# define ScatterHandle(x) (unsigned int)(x)
#endif
int drm_sg_alloc(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
int drm_sg_alloc(drm_device_t * dev, drm_scatter_gather_t * request)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
drm_scatter_gather_t __user *argp = (void __user *)arg;
drm_scatter_gather_t request;
drm_sg_mem_t *entry;
unsigned long pages, i, j;
@ -80,17 +76,13 @@ int drm_sg_alloc(struct inode *inode, struct file *filp,
if (dev->sg)
return -EINVAL;
if (copy_from_user(&request, argp, sizeof(request)))
return -EFAULT;
entry = drm_alloc(sizeof(*entry), DRM_MEM_SGLISTS);
if (!entry)
return -ENOMEM;
memset(entry, 0, sizeof(*entry));
pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE;
DRM_DEBUG("sg size=%ld pages=%ld\n", request.size, pages);
pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
DRM_DEBUG("sg size=%ld pages=%ld\n", request->size, pages);
entry->pages = pages;
entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist),
@ -142,12 +134,7 @@ int drm_sg_alloc(struct inode *inode, struct file *filp,
SetPageReserved(entry->pagelist[j]);
}
request.handle = entry->handle;
if (copy_to_user(argp, &request, sizeof(request))) {
drm_sg_cleanup(entry);
return -EFAULT;
}
request->handle = entry->handle;
dev->sg = entry;
@ -196,6 +183,32 @@ int drm_sg_alloc(struct inode *inode, struct file *filp,
failed:
drm_sg_cleanup(entry);
return -ENOMEM;
}
EXPORT_SYMBOL(drm_sg_alloc);
int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_scatter_gather_t __user *argp = (void __user *)arg;
drm_scatter_gather_t request;
int ret;
if (copy_from_user(&request, argp, sizeof(request)))
return -EFAULT;
ret = drm_sg_alloc(priv->head->dev, &request);
if ( ret ) return ret;
if (copy_to_user(argp, &request, sizeof(request))) {
drm_sg_cleanup(priv->head->dev->sg);
return -EFAULT;
}
return 0;
}
int drm_sg_free(struct inode *inode, struct file *filp,

View File

@ -60,6 +60,11 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
{
int retcode;
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->ctxlist);
INIT_LIST_HEAD(&dev->vmalist);
INIT_LIST_HEAD(&dev->maplist);
spin_lock_init(&dev->count_lock);
spin_lock_init(&dev->drw_lock);
spin_lock_init(&dev->tasklet_lock);
@ -70,6 +75,8 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
mutex_init(&dev->bm.init_mutex);
mutex_init(&dev->bm.evict_mutex);
idr_init(&dev->drw_idr);
dev->pdev = pdev;
dev->pci_device = pdev->device;
dev->pci_vendor = pdev->vendor;
@ -80,28 +87,20 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
dev->irq = pdev->irq;
if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER)) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
return -ENOMEM;
}
if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE)) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
drm_ht_remove(&dev->map_hash);
return -ENOMEM;
}
if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
drm_ht_remove(&dev->map_hash);
drm_mm_takedown(&dev->offset_manager);
return -ENOMEM;
}
dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS);
if (dev->maplist == NULL)
return -ENOMEM;
INIT_LIST_HEAD(&dev->maplist->head);
/* the DRM has 6 counters */
dev->counters = 6;
dev->types[0] = _DRM_STAT_LOCK;
@ -233,18 +232,22 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
if (!drm_fb_loaded) {
pci_set_drvdata(pdev, dev);
pci_request_regions(pdev, driver->pci_driver.name);
ret = pci_request_regions(pdev, driver->pci_driver.name);
if (ret)
goto err_g1;
}
pci_enable_device(pdev);
ret = pci_enable_device(pdev);
if (ret)
goto err_g2;
pci_set_master(pdev);
if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
printk(KERN_ERR "DRM: fill_in_dev failed\n");
goto err_g1;
goto err_g3;
}
if ((ret = drm_get_head(dev, &dev->primary)))
goto err_g1;
goto err_g3;
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
@ -252,12 +255,16 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
return 0;
err_g1:
if (!drm_fb_loaded) {
pci_set_drvdata(pdev, NULL);
pci_release_regions(pdev);
err_g3:
if (!drm_fb_loaded)
pci_disable_device(pdev);
}
err_g2:
if (!drm_fb_loaded)
pci_release_regions(pdev);
err_g1:
if (!drm_fb_loaded)
pci_set_drvdata(pdev, NULL);
drm_free(dev, sizeof(*dev), DRM_MEM_STUB);
printk(KERN_ERR "DRM: drm_get_dev failed.\n");
return ret;

View File

@ -93,11 +93,15 @@ struct drm_sysfs_class *drm_sysfs_create(struct module *owner, char *name)
retval = class_register(&cs->class);
if (retval)
goto error;
class_create_file(&cs->class, &class_attr_version);
retval = class_create_file(&cs->class, &class_attr_version);
if (retval)
goto error_with_class;
return cs;
error:
error_with_class:
class_unregister(&cs->class);
error:
kfree(cs);
return ERR_PTR(retval);
}
@ -170,16 +174,31 @@ struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs,
if (retval)
goto error;
class_device_create_file(&s_dev->class_dev, &cs->attr);
retval = class_device_create_file(&s_dev->class_dev, &cs->attr);
if (retval)
goto error_with_device;
class_set_devdata(&s_dev->class_dev, head);
for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
class_device_create_file(&s_dev->class_dev, &class_device_attrs[i]);
for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) {
retval = class_device_create_file(&s_dev->class_dev,
&class_device_attrs[i]);
if (retval)
goto error_with_files;
}
return &s_dev->class_dev;
error:
error_with_files:
while (i > 0)
class_device_remove_file(&s_dev->class_dev,
&class_device_attrs[--i]);
class_device_remove_file(&s_dev->class_dev, &cs->attr);
error_with_device:
class_device_unregister(&s_dev->class_dev);
error:
kfree(s_dev);
return ERR_PTR(retval);
}

View File

@ -122,7 +122,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
/*
* It's AGP memory - find the real physical page to map
*/
for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
list_for_each_entry(agpmem, &dev->agp->memory, head) {
if (agpmem->bound <= baddr &&
agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
break;
@ -205,10 +205,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->head->dev;
drm_vma_entry_t *pt, *prev, *next;
drm_vma_entry_t *pt, *temp;
drm_map_t *map;
drm_map_list_t *r_list;
struct list_head *list;
int found_maps = 0;
DRM_DEBUG("0x%08lx,0x%08lx\n",
@ -218,19 +217,12 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
map = vma->vm_private_data;
mutex_lock(&dev->struct_mutex);
for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
next = pt->next;
list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
if (pt->vma->vm_private_data == map)
found_maps++;
if (pt->vma == vma) {
if (prev) {
prev->next = pt->next;
} else {
dev->vmalist = pt->next;
}
list_del(&pt->head);
drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
} else {
prev = pt;
}
}
/* We were the only map that was found */
@ -239,9 +231,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
* we delete this mappings information.
*/
found_maps = 0;
list = &dev->maplist->head;
list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map == map)
found_maps++;
}
@ -439,9 +429,8 @@ static void drm_vm_open_locked(struct vm_area_struct *vma)
vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
if (vma_entry) {
vma_entry->vma = vma;
vma_entry->next = dev->vmalist;
vma_entry->pid = current->pid;
dev->vmalist = vma_entry;
list_add(&vma_entry->head, &dev->vmalist);
}
}
@ -467,20 +456,16 @@ static void drm_vm_close(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->head->dev;
drm_vma_entry_t *pt, *prev;
drm_vma_entry_t *pt, *temp;
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
atomic_dec(&dev->vma_count);
mutex_lock(&dev->struct_mutex);
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
if (pt->vma == vma) {
if (prev) {
prev->next = pt->next;
} else {
dev->vmalist = pt->next;
}
list_del(&pt->head);
drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
break;
}
@ -855,7 +840,8 @@ static void drm_bo_vm_close(struct vm_area_struct *vma)
#ifdef DRM_ODD_MM_COMPAT
drm_bo_delete_vma(bo, vma);
#endif
drm_bo_usage_deref_locked(bo);
drm_bo_usage_deref_locked((struct drm_buffer_object **)
&vma->vm_private_data);
mutex_unlock(&dev->struct_mutex);
}
return;

View File

@ -346,12 +346,10 @@ static int i810_dma_initialize(drm_device_t * dev,
drm_i810_private_t * dev_priv,
drm_i810_init_t * init)
{
struct list_head *list;
drm_map_list_t *r_list;
memset(dev_priv, 0, sizeof(drm_i810_private_t));
list_for_each(list, &dev->maplist->head) {
drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map &&
r_list->map->type == _DRM_SHM &&
r_list->map->flags & _DRM_CONTAINS_LOCK) {

View File

@ -41,9 +41,9 @@ static struct pci_device_id pciidlist[] = {
#ifdef I915_HAVE_FENCE
static drm_fence_driver_t i915_fence_driver = {
.num_classes = 1,
.wrap_diff = (1 << 30),
.flush_diff = (1 << 29),
.sequence_mask = 0xffffffffU,
.wrap_diff = (1U << (BREADCRUMB_BITS - 1)),
.flush_diff = (1U << (BREADCRUMB_BITS - 2)),
.sequence_mask = BREADCRUMB_MASK,
.lazy_capable = 1,
.emit = i915_fence_emit_sequence,
.poke_flush = i915_poke_flush,

View File

@ -61,7 +61,7 @@ static void i915_perform_flush(drm_device_t * dev)
* First update fences with the current breadcrumb.
*/
diff = sequence - fc->last_exe_flush;
diff = (sequence - fc->last_exe_flush) & BREADCRUMB_MASK;
if (diff < driver->wrap_diff && diff != 0) {
drm_fence_handler(dev, 0, sequence, DRM_FENCE_TYPE_EXE);
}

View File

@ -30,7 +30,6 @@
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
#include <linux/ioctl32.h>
#include "drmP.h"
#include "drm.h"

View File

@ -32,7 +32,6 @@
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
#include <linux/ioctl32.h>
#include "drmP.h"
#include "drm.h"

View File

@ -32,7 +32,6 @@
*/
#include <linux/compat.h>
#include <linux/ioctl32.h>
#include "drmP.h"
#include "drm.h"

View File

@ -0,0 +1 @@
../shared-core/nouveau_notifier.c

1
linux-core/nv04_fifo.c Symbolic link
View File

@ -0,0 +1 @@
../shared-core/nv04_fifo.c

1
linux-core/nv04_instmem.c Symbolic link
View File

@ -0,0 +1 @@
../shared-core/nv04_instmem.c

1
linux-core/nv10_fifo.c Symbolic link
View File

@ -0,0 +1 @@
../shared-core/nv10_fifo.c

1
linux-core/nv40_fifo.c Symbolic link
View File

@ -0,0 +1 @@
../shared-core/nv40_fifo.c

1
linux-core/nv50_fifo.c Symbolic link
View File

@ -0,0 +1 @@
../shared-core/nv50_fifo.c

1
linux-core/nv50_graph.c Symbolic link
View File

@ -0,0 +1 @@
../shared-core/nv50_graph.c

1
linux-core/nv50_instmem.c Symbolic link
View File

@ -0,0 +1 @@
../shared-core/nv50_instmem.c

1
linux-core/nv50_mc.c Symbolic link
View File

@ -0,0 +1 @@
../shared-core/nv50_mc.c

View File

@ -30,7 +30,6 @@
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
#include <linux/ioctl32.h>
#include "drmP.h"
#include "drm.h"

View File

@ -61,7 +61,7 @@ static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED |
DRIVER_IRQ_VBL,
DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
.dev_priv_size = sizeof(drm_radeon_buf_priv_t),
.load = radeon_driver_load,
.firstopen = radeon_driver_firstopen,
@ -71,6 +71,7 @@ static struct drm_driver driver = {
.lastclose = radeon_driver_lastclose,
.unload = radeon_driver_unload,
.vblank_wait = radeon_driver_vblank_wait,
.vblank_wait2 = radeon_driver_vblank_wait2,
.dri_library_name = dri_library_name,
.irq_preinstall = radeon_driver_irq_preinstall,
.irq_postinstall = radeon_driver_irq_postinstall,

View File

@ -28,7 +28,6 @@
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
#include <linux/ioctl32.h>
#include "drmP.h"
#include "drm.h"
@ -350,6 +349,36 @@ static int compat_radeon_irq_emit(struct file *file, unsigned int cmd,
DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long) request);
}
/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */
#if defined (CONFIG_X86_64) || defined(CONFIG_IA64)
typedef struct drm_radeon_setparam32 {
int param;
u64 value;
} __attribute__((packed)) drm_radeon_setparam32_t;
static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_radeon_setparam32_t req32;
drm_radeon_setparam_t __user *request;
if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|| __put_user(req32.param, &request->param)
|| __put_user((void __user *)(unsigned long)req32.value,
&request->value))
return -EFAULT;
return drm_ioctl(file->f_dentry->d_inode, file,
DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
}
#else
#define compat_radeon_cp_setparam NULL
#endif /* X86_64 || IA64 */
drm_ioctl_compat_t *radeon_compat_ioctls[] = {
[DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
[DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
@ -358,6 +387,7 @@ drm_ioctl_compat_t *radeon_compat_ioctls[] = {
[DRM_RADEON_VERTEX2] = compat_radeon_cp_vertex2,
[DRM_RADEON_CMDBUF] = compat_radeon_cp_cmdbuf,
[DRM_RADEON_GETPARAM] = compat_radeon_cp_getparam,
[DRM_RADEON_SETPARAM] = compat_radeon_cp_setparam,
[DRM_RADEON_ALLOC] = compat_radeon_mem_alloc,
[DRM_RADEON_IRQ_EMIT] = compat_radeon_irq_emit,
};

View File

@ -233,7 +233,7 @@ static drm_local_map_t *sis_reg_init(drm_device_t *dev)
drm_map_list_t *entry;
drm_local_map_t *map;
list_for_each_entry(entry, &dev->maplist->head, head) {
list_for_each_entry(entry, &dev->maplist, head) {
map = entry->map;
if (!map)
continue;

View File

@ -98,13 +98,20 @@
0x1002 0x5653 CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X700 M26"
0x1002 0x5834 CHIP_RS300|RADEON_IS_IGP "ATI Radeon RS300 9100 IGP"
0x1002 0x5835 CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY "ATI Radeon RS300 Mobility IGP"
0x1002 0x5954 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI RS480 XPRESS 200G"
0x1002 0x5955 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon XPRESS 200M 5955"
0x1002 0x5974 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RS482 XPRESS 200"
0x1002 0x5975 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RS485 XPRESS 1100 IGP"
0x1002 0x5960 CHIP_RV280 "ATI Radeon RV280 9250"
0x1002 0x5961 CHIP_RV280 "ATI Radeon RV280 9200"
0x1002 0x5962 CHIP_RV280 "ATI Radeon RV280 9200"
0x1002 0x5964 CHIP_RV280 "ATI Radeon RV280 9200 SE"
0x1002 0x5965 CHIP_RV280 "ATI FireMV 2200 PCI"
0x1002 0x5969 CHIP_RV100 "ATI ES1000 RN50"
0x1002 0x5a41 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RS400 XPRESS 200"
0x1002 0x5a42 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RS400 XPRESS 200M"
0x1002 0x5a61 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RC410 XPRESS 200"
0x1002 0x5a62 CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART "ATI Radeon RC410 XPRESS 200M"
0x1002 0x5b60 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X300 SE"
0x1002 0x5b62 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X600 Pro"
0x1002 0x5b63 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X550"
@ -204,6 +211,8 @@
0x1039 0x6300 0 "SiS 630"
0x1039 0x6330 SIS_CHIP_315 "SiS 661"
0x1039 0x7300 0 "SiS 730"
0x18CA 0x0040 SIS_CHIP_315 "Volari V3XT/V5/V8"
0x18CA 0x0042 SIS_CHIP_315 "Volari Unknown"
[tdfx]
0x121a 0x0003 0 "3dfx Voodoo Banshee"
@ -275,11 +284,16 @@
0x8086 0x2592 CHIP_I9XX|CHIP_I915 "Intel i915GM"
0x8086 0x2772 CHIP_I9XX|CHIP_I915 "Intel i945G"
0x8086 0x27A2 CHIP_I9XX|CHIP_I915 "Intel i945GM"
0x8086 0x27AE CHIP_I9XX|CHIP_I915 "Intel i945GME"
0x8086 0x2972 CHIP_I9XX|CHIP_I965 "Intel i946GZ"
0x8086 0x2982 CHIP_I9XX|CHIP_I965 "Intel i965G"
0x8086 0x2992 CHIP_I9XX|CHIP_I965 "Intel i965Q"
0x8086 0x29A2 CHIP_I9XX|CHIP_I965 "Intel i965G"
0x8086 0x2A02 CHIP_I9XX|CHIP_I965 "Intel i965GM"
0x8086 0x2A12 CHIP_I9XX|CHIP_I965 "Intel i965GME/GLE"
0x8086 0x29C2 CHIP_I9XX|CHIP_I915 "Intel G33"
0x8086 0x29B2 CHIP_I9XX|CHIP_I915 "Intel Q35"
0x8086 0x29D2 CHIP_I9XX|CHIP_I915 "Intel Q33"
[imagine]
0x105d 0x2309 IMAGINE_128 "Imagine 128"
@ -582,6 +596,9 @@
0x10de 0x018d NV_17 "GeForce4 448 Go"
0x10de 0x0191 NV_50 "GeForce 8800 GTX"
0x10de 0x0193 NV_50 "GeForce 8800 GTS"
0x10de 0x0194 NV_50 "GeForce 8800 Ultra"
0x10de 0x019d NV_50 "Quadro FX 5600"
0x10de 0x019e NV_50 "Quadro FX 4600"
0x10de 0x01a0 NV_11|NV_NFORCE "GeForce2 MX Integrated Graphics"
0x10de 0x01d1 NV_44 "GeForce 7300 LE"
0x10de 0x01d6 NV_44 "GeForce Go 7200"
@ -688,6 +705,11 @@
0x10de 0x03d1 NV_44 "GeForce 6100 nForce 405"
0x10de 0x03d2 NV_44 "GeForce 6100 nForce 400"
0x10de 0x03d5 NV_44 "GeForce 6100 nForce 420"
0x10de 0x0400 NV_50 "GeForce 8600 GTS"
0x10de 0x0402 NV_50 "GeForce 8600 GT"
0x10de 0x0421 NV_50 "GeForce 8500 GT"
0x10de 0x0422 NV_50 "GeForce 8400 GS"
0x10de 0x0423 NV_50 "GeForce 8300 GS"
0x12d2 0x0008 NV_03 "NV1"
0x12d2 0x0009 NV_03 "DAC64"
0x12d2 0x0018 NV_03 "Riva128"

View File

@ -35,8 +35,12 @@
dev->pci_device == 0x2982 || \
dev->pci_device == 0x2992 || \
dev->pci_device == 0x29A2 || \
dev->pci_device == 0x2A02)
dev->pci_device == 0x2A02 || \
dev->pci_device == 0x2A12)
#define IS_G33(dev) (dev->pci_device == 0x29C2 || \
dev->pci_device == 0x29B2 || \
dev->pci_device == 0x29D2)
/* Really want an OS-independent resettable timer. Would like to have
* this loop run for (eg) 3 sec, but have the timer reset every time
@ -107,7 +111,11 @@ static int i915_dma_cleanup(drm_device_t * dev)
/* Need to rewrite hardware status page */
I915_WRITE(0x02080, 0x1ffff000);
}
if (dev_priv->status_gfx_addr) {
dev_priv->status_gfx_addr = 0;
drm_core_ioremapfree(&dev_priv->hws_map, dev);
I915_WRITE(0x02080, 0x1ffff000);
}
drm_free(dev->dev_private, sizeof(drm_i915_private_t),
DRM_MEM_DRIVER);
@ -177,23 +185,28 @@ static int i915_initialize(drm_device_t * dev,
*/
dev_priv->allow_batchbuffer = 1;
/* Enable vblank on pipe A for older X servers
*/
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
/* Program Hardware Status Page */
dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE,
0xffffffff);
if (!IS_G33(dev)) {
dev_priv->status_page_dmah =
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
if (!dev_priv->status_page_dmah) {
dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
DRM_ERROR("Can not allocate hardware status page\n");
return DRM_ERR(ENOMEM);
if (!dev_priv->status_page_dmah) {
dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
DRM_ERROR("Can not allocate hardware status page\n");
return DRM_ERR(ENOMEM);
}
dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
I915_WRITE(0x02080, dev_priv->dma_status_page);
}
dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
I915_WRITE(0x02080, dev_priv->dma_status_page);
DRM_DEBUG("Enabled hardware status page\n");
dev->dev_private = (void *)dev_priv;
return 0;
@ -228,7 +241,10 @@ static int i915_dma_resume(drm_device_t * dev)
}
DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
I915_WRITE(0x02080, dev_priv->dma_status_page);
if (dev_priv->status_gfx_addr != 0)
I915_WRITE(0x02080, dev_priv->status_gfx_addr);
else
I915_WRITE(0x02080, dev_priv->dma_status_page);
DRM_DEBUG("Enabled hardware status page\n");
return 0;
@ -429,10 +445,12 @@ void i915_emit_breadcrumb(drm_device_t *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
if (++dev_priv->counter > BREADCRUMB_MASK) {
dev_priv->counter = 1;
DRM_DEBUG("Breadcrumb counter wrapped around\n");
}
if (dev_priv->counter > 0x7FFFFFFFUL)
dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
BEGIN_LP_RING(4);
OUT_RING(CMD_STORE_DWORD_IDX);
@ -467,7 +485,9 @@ int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush)
static int i915_dispatch_cmdbuffer(drm_device_t * dev,
drm_i915_cmdbuffer_t * cmd)
{
#ifdef I915_HAVE_FENCE
drm_i915_private_t *dev_priv = dev->dev_private;
#endif
int nbox = cmd->num_cliprects;
int i = 0, count, ret;
@ -852,7 +872,7 @@ static int i915_mmio(DRM_IOCTL_ARGS)
return DRM_ERR(EINVAL);
e = &mmio_table[mmio.reg];
base = dev_priv->mmio_map->handle + e->offset;
base = (u8 *) dev_priv->mmio_map->handle + e->offset;
switch (mmio.read_write) {
case I915_MMIO_READ:
@ -878,6 +898,47 @@ static int i915_mmio(DRM_IOCTL_ARGS)
return 0;
}
static int i915_set_status_page(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_hws_addr_t hws;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
}
DRM_COPY_FROM_USER_IOCTL(hws, (drm_i915_hws_addr_t __user *) data,
sizeof(hws));
printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws.addr);
dev_priv->status_gfx_addr = hws.addr & (0x1ffff<<12);
dev_priv->hws_map.offset = dev->agp->agp_info.aper_base + hws.addr;
dev_priv->hws_map.size = 4*1024;
dev_priv->hws_map.type = 0;
dev_priv->hws_map.flags = 0;
dev_priv->hws_map.mtrr = 0;
drm_core_ioremap(&dev_priv->hws_map, dev);
if (dev_priv->hws_map.handle == NULL) {
dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
dev_priv->status_gfx_addr = 0;
DRM_ERROR("can not ioremap virtual address for"
" G33 hw status page\n");
return DRM_ERR(ENOMEM);
}
dev_priv->hw_status_page = dev_priv->hws_map.handle;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
I915_WRITE(0x02080, dev_priv->status_gfx_addr);
DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
dev_priv->status_gfx_addr);
DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
return 0;
}
int i915_driver_load(drm_device_t *dev, unsigned long flags)
{
/* i915 has 4 more counters */
@ -926,6 +987,7 @@ drm_ioctl_desc_t i915_ioctls[] = {
[DRM_IOCTL_NR(DRM_I915_GET_VBLANK_PIPE)] = { i915_vblank_pipe_get, DRM_AUTH },
[DRM_IOCTL_NR(DRM_I915_VBLANK_SWAP)] = {i915_vblank_swap, DRM_AUTH},
[DRM_IOCTL_NR(DRM_I915_MMIO)] = {i915_mmio, DRM_AUTH},
[DRM_IOCTL_NR(DRM_I915_HWS_ADDR)] = {i915_set_status_page, DRM_AUTH},
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);

View File

@ -159,6 +159,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GET_VBLANK_PIPE 0x0e
#define DRM_I915_VBLANK_SWAP 0x0f
#define DRM_I915_MMIO 0x10
#define DRM_I915_HWS_ADDR 0x11
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@ -314,4 +315,8 @@ typedef struct drm_i915_mmio {
void __user *data;
} drm_i915_mmio_t;
typedef struct drm_i915_hws_addr {
uint64_t addr;
} drm_i915_hws_addr_t;
#endif /* _I915_DRM_H_ */

View File

@ -99,6 +99,8 @@ typedef struct drm_i915_private {
void *hw_status_page;
dma_addr_t dma_status_page;
uint32_t counter;
unsigned int status_gfx_addr;
drm_local_map_t hws_map;
unsigned int cpp;
int use_mi_batchbuffer_start;
@ -361,6 +363,9 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
#define BREADCRUMB_BITS 31
#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
#endif

View File

@ -381,13 +381,6 @@ void i915_user_irq_off(drm_i915_private_t *dev_priv)
spin_unlock(&dev_priv->user_irq_lock);
}
static int wait_compare(struct drm_device *dev, void *priv)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int irq_nr = (u64)priv;
return (READ_BREADCRUMB(dev_priv) >= irq_nr);
}
static int i915_wait_irq(drm_device_t * dev, int irq_nr)
{
@ -403,8 +396,8 @@ static int i915_wait_irq(drm_device_t * dev, int irq_nr)
dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
i915_user_irq_on(dev_priv);
ret = drm_wait_on(dev, &dev_priv->irq_queue, 3 * DRM_HZ, wait_compare,
(void *)(u64)irq_nr);
DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
READ_BREADCRUMB(dev_priv) >= irq_nr);
i915_user_irq_off(dev_priv);
if (ret == DRM_ERR(EBUSY)) {
@ -722,22 +715,13 @@ void i915_driver_irq_postinstall(drm_device_t * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
dev_priv->swaps_lock = SPIN_LOCK_UNLOCKED;
spin_lock_init(&dev_priv->swaps_lock);
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
dev_priv->swaps_pending = 0;
if (!dev_priv->vblank_pipe)
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
dev_priv->swaps_lock = SPIN_LOCK_UNLOCKED;
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
dev_priv->swaps_pending = 0;
dev_priv->user_irq_lock = SPIN_LOCK_UNLOCKED;
spin_lock_init(&dev_priv->user_irq_lock);
dev_priv->user_irq_refcount = 0;
if (!dev_priv->vblank_pipe)
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
i915_enable_interrupt(dev);
DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);

View File

@ -551,7 +551,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
drm_map_list_t *_entry;
unsigned long agp_token = 0;
list_for_each_entry(_entry, &dev->maplist->head, head) {
list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == dev->agp_buffer_map)
agp_token = _entry->user_token;
}

View File

@ -25,9 +25,12 @@
#ifndef __NOUVEAU_DRM_H__
#define __NOUVEAU_DRM_H__
#define NOUVEAU_DRM_HEADER_PATCHLEVEL 6
#define NOUVEAU_DRM_HEADER_PATCHLEVEL 8
typedef struct drm_nouveau_fifo_alloc {
uint32_t fb_ctxdma_handle;
uint32_t tt_ctxdma_handle;
int channel;
uint32_t put_base;
/* FIFO control regs */
@ -36,38 +39,42 @@ typedef struct drm_nouveau_fifo_alloc {
/* DMA command buffer */
drm_handle_t cmdbuf;
int cmdbuf_size;
/* Notifier memory */
drm_handle_t notifier;
int notifier_size;
}
drm_nouveau_fifo_alloc_t;
typedef struct drm_nouveau_object_init {
typedef struct drm_nouveau_grobj_alloc {
int channel;
uint32_t handle;
int class;
}
drm_nouveau_object_init_t;
drm_nouveau_grobj_alloc_t;
#define NOUVEAU_MEM_ACCESS_RO 1
#define NOUVEAU_MEM_ACCESS_WO 2
#define NOUVEAU_MEM_ACCESS_RW 3
typedef struct drm_nouveau_dma_object_init {
typedef struct drm_nouveau_notifier_alloc {
int channel;
uint32_t handle;
int class;
int access;
int target;
int count;
uint32_t offset;
int size;
}
drm_nouveau_dma_object_init_t;
drm_nouveau_notifier_alloc_t;
#define NOUVEAU_MEM_FB 0x00000001
#define NOUVEAU_MEM_AGP 0x00000002
#define NOUVEAU_MEM_FB_ACCEPTABLE 0x00000004
#define NOUVEAU_MEM_AGP_ACCEPTABLE 0x00000008
#define NOUVEAU_MEM_PINNED 0x00000010
#define NOUVEAU_MEM_USER_BACKED 0x00000020
#define NOUVEAU_MEM_MAPPED 0x00000040
#define NOUVEAU_MEM_INSTANCE 0x00000080 /* internal */
#define NOUVEAU_MEM_PCI 0x00000010
#define NOUVEAU_MEM_PCI_ACCEPTABLE 0x00000020
#define NOUVEAU_MEM_PINNED 0x00000040
#define NOUVEAU_MEM_USER_BACKED 0x00000080
#define NOUVEAU_MEM_MAPPED 0x00000100
#define NOUVEAU_MEM_INSTANCE 0x00000200 /* internal */
#define NOUVEAU_MEM_NOTIFIER 0x00000400 /* internal */
typedef struct drm_nouveau_mem_alloc {
int flags;
@ -91,6 +98,7 @@ drm_nouveau_mem_free_t;
#define NOUVEAU_GETPARAM_AGP_PHYSICAL 7
#define NOUVEAU_GETPARAM_FB_SIZE 8
#define NOUVEAU_GETPARAM_AGP_SIZE 9
#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10
typedef struct drm_nouveau_getparam {
uint64_t param;
uint64_t value;
@ -114,7 +122,7 @@ enum nouveau_card_type {
NV_10 =10,
NV_11 =10,
NV_15 =10,
NV_17 =10,
NV_17 =17,
NV_20 =20,
NV_25 =20,
NV_30 =30,
@ -141,8 +149,8 @@ typedef struct drm_nouveau_sarea {
drm_nouveau_sarea_t;
#define DRM_NOUVEAU_FIFO_ALLOC 0x00
#define DRM_NOUVEAU_OBJECT_INIT 0x01
#define DRM_NOUVEAU_DMA_OBJECT_INIT 0x02
#define DRM_NOUVEAU_GROBJ_ALLOC 0x01
#define DRM_NOUVEAU_NOTIFIER_ALLOC 0x02
#define DRM_NOUVEAU_MEM_ALLOC 0x03
#define DRM_NOUVEAU_MEM_FREE 0x04
#define DRM_NOUVEAU_GETPARAM 0x05

View File

@ -34,7 +34,7 @@
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 6
#define DRIVER_PATCHLEVEL 8
#define NOUVEAU_FAMILY 0x0000FFFF
#define NOUVEAU_FLAGS 0xFFFF0000
@ -57,39 +57,71 @@ enum nouveau_flags {
NV_NFORCE2 =0x20000000
};
struct nouveau_object
{
struct nouveau_object *next;
struct nouveau_object *prev;
#define NVOBJ_ENGINE_SW 0
#define NVOBJ_ENGINE_GR 1
#define NVOBJ_ENGINE_INT 0xdeadbeef
#define NVOBJ_FLAG_ALLOW_NO_REFS (1 << 0)
#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
#define NVOBJ_FLAG_FAKE (1 << 3)
typedef struct nouveau_gpuobj {
struct nouveau_gpuobj *next;
struct nouveau_gpuobj *prev;
int im_channel;
struct mem_block *im_pramin;
struct mem_block *im_backing;
int im_bound;
uint32_t flags;
int refcount;
uint32_t engine;
uint32_t class;
} nouveau_gpuobj_t;
typedef struct nouveau_gpuobj_ref {
struct nouveau_gpuobj_ref *next;
nouveau_gpuobj_t *gpuobj;
uint32_t instance;
int channel;
struct mem_block *instance;
uint32_t ht_loc;
uint32_t handle;
int class;
int engine;
};
int handle;
} nouveau_gpuobj_ref_t;
struct nouveau_fifo
{
int used;
/* owner of this fifo */
DRMFILE filp;
/* mapping of the fifo itself */
drm_local_map_t *map;
/* mapping of the regs controling the fifo */
drm_local_map_t *regs;
/* dma object for the command buffer itself */
struct mem_block *cmdbuf_mem;
struct nouveau_object *cmdbuf_obj;
/* PGRAPH context, for cards that keep it in RAMIN */
struct mem_block *ramin_grctx;
/* objects belonging to this fifo */
struct nouveau_object *objs;
/* XXX dynamic alloc ? */
uint32_t pgraph_ctx [340];
/* DMA push buffer */
nouveau_gpuobj_ref_t *pushbuf;
struct mem_block *pushbuf_mem;
uint32_t pushbuf_base;
/* Notifier memory */
struct mem_block *notifier_block;
struct mem_block *notifier_heap;
drm_local_map_t *notifier_map;
/* PFIFO context */
nouveau_gpuobj_ref_t *ramfc;
/* PGRAPH context */
nouveau_gpuobj_ref_t *ramin_grctx;
uint32_t pgraph_ctx [340]; /* XXX dynamic alloc ? */
/* Objects */
nouveau_gpuobj_ref_t *ramin; /* Private instmem */
struct mem_block *ramin_heap; /* Private PRAMIN heap */
nouveau_gpuobj_ref_t *ramht; /* Hash table */
nouveau_gpuobj_ref_t *ramht_refs; /* Objects referenced by RAMHT */
};
struct nouveau_config {
@ -99,34 +131,65 @@ struct nouveau_config {
} cmdbuf;
};
struct nouveau_engine_func {
typedef struct nouveau_engine_func {
struct {
int (*Init)(drm_device_t *dev);
void (*Takedown)(drm_device_t *dev);
} Mc;
void *priv;
int (*init)(drm_device_t *dev);
void (*takedown)(drm_device_t *dev);
int (*populate)(drm_device_t *, nouveau_gpuobj_t *,
uint32_t *size);
void (*clear)(drm_device_t *, nouveau_gpuobj_t *);
int (*bind)(drm_device_t *, nouveau_gpuobj_t *);
int (*unbind)(drm_device_t *, nouveau_gpuobj_t *);
} instmem;
struct {
int (*Init)(drm_device_t *dev);
void (*Takedown)(drm_device_t *dev);
} Timer;
int (*init)(drm_device_t *dev);
void (*takedown)(drm_device_t *dev);
} mc;
struct {
int (*Init)(drm_device_t *dev);
void (*Takedown)(drm_device_t *dev);
} Fb;
int (*init)(drm_device_t *dev);
void (*takedown)(drm_device_t *dev);
} timer;
struct {
int (*Init)(drm_device_t *dev);
void (*Takedown)(drm_device_t *dev);
} Graph;
int (*init)(drm_device_t *dev);
void (*takedown)(drm_device_t *dev);
} fb;
struct {
int (*Init)(drm_device_t *dev);
void (*Takedown)(drm_device_t *dev);
} Fifo;
};
int (*init)(drm_device_t *);
void (*takedown)(drm_device_t *);
int (*create_context)(drm_device_t *, int channel);
void (*destroy_context)(drm_device_t *, int channel);
int (*load_context)(drm_device_t *, int channel);
int (*save_context)(drm_device_t *, int channel);
} graph;
struct {
void *priv;
int (*init)(drm_device_t *);
void (*takedown)(drm_device_t *);
int (*create_context)(drm_device_t *, int channel);
void (*destroy_context)(drm_device_t *, int channel);
int (*load_context)(drm_device_t *, int channel);
int (*save_context)(drm_device_t *, int channel);
} fifo;
} nouveau_engine_func_t;
typedef struct drm_nouveau_private {
enum {
NOUVEAU_CARD_INIT_DOWN,
NOUVEAU_CARD_INIT_DONE,
NOUVEAU_CARD_INIT_FAILED
} init_state;
/* the card type, takes NV_* as values */
int card_type;
/* exact chipset, derived from NV_PMC_BOOT_0 */
@ -138,12 +201,13 @@ typedef struct drm_nouveau_private {
drm_local_map_t *ramin; /* NV40 onwards */
int fifo_alloc_count;
struct nouveau_fifo fifos[NV_MAX_FIFO_NUMBER];
struct nouveau_fifo *fifos[NV_MAX_FIFO_NUMBER];
struct nouveau_engine_func Engine;
/* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
uint32_t ramin_size;
nouveau_gpuobj_t *ramht;
uint32_t ramin_rsvd_vram;
uint32_t ramht_offset;
uint32_t ramht_size;
uint32_t ramht_bits;
@ -165,12 +229,15 @@ typedef struct drm_nouveau_private {
struct mem_block *fb_heap;
struct mem_block *fb_nomap_heap;
struct mem_block *ramin_heap;
struct mem_block *pci_heap;
/* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */
uint32_t ctx_table_size;
struct mem_block *ctx_table;
nouveau_gpuobj_ref_t *ctx_table;
struct nouveau_config config;
nouveau_gpuobj_t *gpuobj_all;
}
drm_nouveau_private_t;
@ -186,6 +253,13 @@ extern void nouveau_wait_for_idle(struct drm_device *dev);
extern int nouveau_ioctl_card_init(DRM_IOCTL_ARGS);
/* nouveau_mem.c */
extern int nouveau_mem_init_heap(struct mem_block **,
uint64_t start, uint64_t size);
extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *,
uint64_t size, int align2,
DRMFILE);
extern void nouveau_mem_takedown(struct mem_block **heap);
extern void nouveau_mem_free_block(struct mem_block *);
extern uint64_t nouveau_mem_fb_amount(struct drm_device *dev);
extern void nouveau_mem_release(DRMFILE filp, struct mem_block *heap);
extern int nouveau_ioctl_mem_alloc(DRM_IOCTL_ARGS);
@ -194,16 +268,13 @@ extern struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment
extern void nouveau_mem_free(struct drm_device* dev, struct mem_block*);
extern int nouveau_mem_init(struct drm_device *dev);
extern void nouveau_mem_close(struct drm_device *dev);
extern int nouveau_instmem_init(struct drm_device *dev);
extern struct mem_block* nouveau_instmem_alloc(struct drm_device *dev,
uint32_t size, uint32_t align);
extern void nouveau_instmem_free(struct drm_device *dev,
struct mem_block *block);
extern uint32_t nouveau_instmem_r32(drm_nouveau_private_t *dev_priv,
struct mem_block *mem, int index);
extern void nouveau_instmem_w32(drm_nouveau_private_t *dev_priv,
struct mem_block *mem, int index,
uint32_t val);
/* nouveau_notifier.c */
extern int nouveau_notifier_init_channel(drm_device_t *, int channel, DRMFILE);
extern void nouveau_notifier_takedown_channel(drm_device_t *, int channel);
extern int nouveau_notifier_alloc(drm_device_t *, int channel,
uint32_t handle, int cout, uint32_t *offset);
extern int nouveau_ioctl_notifier_alloc(DRM_IOCTL_ARGS);
/* nouveau_fifo.c */
extern int nouveau_fifo_init(drm_device_t *dev);
@ -214,17 +285,28 @@ extern int nouveau_fifo_owner(drm_device_t *dev, DRMFILE filp, int channel);
extern void nouveau_fifo_free(drm_device_t *dev, int channel);
/* nouveau_object.c */
extern void nouveau_object_cleanup(drm_device_t *dev, int channel);
extern struct nouveau_object *
nouveau_object_gr_create(drm_device_t *dev, int channel, int class);
extern struct nouveau_object *
nouveau_object_dma_create(drm_device_t *dev, int channel, int class,
uint32_t offset, uint32_t size,
int access, int target);
extern void nouveau_object_free(drm_device_t *dev, struct nouveau_object *obj);
extern int nouveau_ioctl_object_init(DRM_IOCTL_ARGS);
extern int nouveau_ioctl_dma_object_init(DRM_IOCTL_ARGS);
extern uint32_t nouveau_chip_instance_get(drm_device_t *dev, struct mem_block *mem);
extern void nouveau_gpuobj_takedown(drm_device_t *dev);
extern int nouveau_gpuobj_channel_init(drm_device_t *, int channel,
uint32_t vram_h, uint32_t tt_h);
extern void nouveau_gpuobj_channel_takedown(drm_device_t *, int channel);
extern int nouveau_gpuobj_new(drm_device_t *, int channel, int size, int align,
uint32_t flags, nouveau_gpuobj_t **);
extern int nouveau_gpuobj_del(drm_device_t *, nouveau_gpuobj_t **);
extern int nouveau_gpuobj_ref_add(drm_device_t *, int channel, uint32_t handle,
nouveau_gpuobj_t *, nouveau_gpuobj_ref_t **);
extern int nouveau_gpuobj_ref_del(drm_device_t *, nouveau_gpuobj_ref_t **);
extern int nouveau_gpuobj_new_ref(drm_device_t *, int chan_obj, int chan_ref,
uint32_t handle, int size, int align,
uint32_t flags, nouveau_gpuobj_ref_t **);
extern int nouveau_gpuobj_new_fake(drm_device_t *, uint32_t offset,
uint32_t size, uint32_t flags,
nouveau_gpuobj_t**, nouveau_gpuobj_ref_t**);
extern int nouveau_gpuobj_dma_new(drm_device_t *, int channel, int class,
uint64_t offset, uint64_t size,
int access, int target, nouveau_gpuobj_t **);
extern int nouveau_gpuobj_gr_new(drm_device_t *, int channel, int class,
nouveau_gpuobj_t **);
extern int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS);
/* nouveau_irq.c */
extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
@ -244,35 +326,100 @@ extern void nv10_fb_takedown(drm_device_t *dev);
extern int nv40_fb_init(drm_device_t *dev);
extern void nv40_fb_takedown(drm_device_t *dev);
/* nv04_fifo.c */
extern int nv04_fifo_create_context(drm_device_t *dev, int channel);
extern void nv04_fifo_destroy_context(drm_device_t *dev, int channel);
extern int nv04_fifo_load_context(drm_device_t *dev, int channel);
extern int nv04_fifo_save_context(drm_device_t *dev, int channel);
/* nv10_fifo.c */
extern int nv10_fifo_create_context(drm_device_t *dev, int channel);
extern void nv10_fifo_destroy_context(drm_device_t *dev, int channel);
extern int nv10_fifo_load_context(drm_device_t *dev, int channel);
extern int nv10_fifo_save_context(drm_device_t *dev, int channel);
/* nv40_fifo.c */
extern int nv40_fifo_create_context(drm_device_t *, int channel);
extern void nv40_fifo_destroy_context(drm_device_t *, int channel);
extern int nv40_fifo_load_context(drm_device_t *, int channel);
extern int nv40_fifo_save_context(drm_device_t *, int channel);
/* nv50_fifo.c */
extern int nv50_fifo_init(drm_device_t *);
extern void nv50_fifo_takedown(drm_device_t *);
extern int nv50_fifo_create_context(drm_device_t *, int channel);
extern void nv50_fifo_destroy_context(drm_device_t *, int channel);
extern int nv50_fifo_load_context(drm_device_t *, int channel);
extern int nv50_fifo_save_context(drm_device_t *, int channel);
/* nv04_graph.c */
extern void nouveau_nv04_context_switch(drm_device_t *dev);
extern int nv04_graph_init(drm_device_t *dev);
extern int nv04_graph_init(drm_device_t *dev);
extern void nv04_graph_takedown(drm_device_t *dev);
extern int nv04_graph_context_create(drm_device_t *dev, int channel);
extern int nv04_graph_create_context(drm_device_t *dev, int channel);
extern void nv04_graph_destroy_context(drm_device_t *dev, int channel);
extern int nv04_graph_load_context(drm_device_t *dev, int channel);
extern int nv04_graph_save_context(drm_device_t *dev, int channel);
/* nv10_graph.c */
extern void nouveau_nv10_context_switch(drm_device_t *dev);
extern int nv10_graph_init(drm_device_t *dev);
extern int nv10_graph_init(drm_device_t *dev);
extern void nv10_graph_takedown(drm_device_t *dev);
extern int nv10_graph_context_create(drm_device_t *dev, int channel);
extern int nv10_graph_create_context(drm_device_t *dev, int channel);
extern void nv10_graph_destroy_context(drm_device_t *dev, int channel);
extern int nv10_graph_load_context(drm_device_t *dev, int channel);
extern int nv10_graph_save_context(drm_device_t *dev, int channel);
/* nv20_graph.c */
extern void nouveau_nv20_context_switch(drm_device_t *dev);
extern int nv20_graph_init(drm_device_t *dev);
extern int nv20_graph_init(drm_device_t *dev);
extern void nv20_graph_takedown(drm_device_t *dev);
extern int nv20_graph_context_create(drm_device_t *dev, int channel);
extern int nv20_graph_create_context(drm_device_t *dev, int channel);
extern void nv20_graph_destroy_context(drm_device_t *dev, int channel);
extern int nv20_graph_load_context(drm_device_t *dev, int channel);
extern int nv20_graph_save_context(drm_device_t *dev, int channel);
/* nv30_graph.c */
extern int nv30_graph_init(drm_device_t *dev);
extern int nv30_graph_init(drm_device_t *dev);
extern void nv30_graph_takedown(drm_device_t *dev);
extern int nv30_graph_context_create(drm_device_t *dev, int channel);
extern int nv30_graph_create_context(drm_device_t *, int channel);
extern void nv30_graph_destroy_context(drm_device_t *, int channel);
extern int nv30_graph_load_context(drm_device_t *, int channel);
extern int nv30_graph_save_context(drm_device_t *, int channel);
/* nv40_graph.c */
extern int nv40_graph_init(drm_device_t *dev);
extern void nv40_graph_takedown(drm_device_t *dev);
extern int nv40_graph_context_create(drm_device_t *dev, int channel);
extern void nv40_graph_context_save_current(drm_device_t *dev);
extern void nv40_graph_context_restore(drm_device_t *dev, int channel);
extern int nv40_graph_init(drm_device_t *);
extern void nv40_graph_takedown(drm_device_t *);
extern int nv40_graph_create_context(drm_device_t *, int channel);
extern void nv40_graph_destroy_context(drm_device_t *, int channel);
extern int nv40_graph_load_context(drm_device_t *, int channel);
extern int nv40_graph_save_context(drm_device_t *, int channel);
/* nv50_graph.c */
extern int nv50_graph_init(drm_device_t *);
extern void nv50_graph_takedown(drm_device_t *);
extern int nv50_graph_create_context(drm_device_t *, int channel);
extern void nv50_graph_destroy_context(drm_device_t *, int channel);
extern int nv50_graph_load_context(drm_device_t *, int channel);
extern int nv50_graph_save_context(drm_device_t *, int channel);
/* nv04_instmem.c */
extern int nv04_instmem_init(drm_device_t *dev);
extern void nv04_instmem_takedown(drm_device_t *dev);
extern int nv04_instmem_populate(drm_device_t*, nouveau_gpuobj_t*,
uint32_t *size);
extern void nv04_instmem_clear(drm_device_t*, nouveau_gpuobj_t*);
extern int nv04_instmem_bind(drm_device_t*, nouveau_gpuobj_t*);
extern int nv04_instmem_unbind(drm_device_t*, nouveau_gpuobj_t*);
/* nv50_instmem.c */
extern int nv50_instmem_init(drm_device_t *dev);
extern void nv50_instmem_takedown(drm_device_t *dev);
extern int nv50_instmem_populate(drm_device_t*, nouveau_gpuobj_t*,
uint32_t *size);
extern void nv50_instmem_clear(drm_device_t*, nouveau_gpuobj_t*);
extern int nv50_instmem_bind(drm_device_t*, nouveau_gpuobj_t*);
extern int nv50_instmem_unbind(drm_device_t*, nouveau_gpuobj_t*);
/* nv04_mc.c */
extern int nv04_mc_init(drm_device_t *dev);
@ -282,6 +429,10 @@ extern void nv04_mc_takedown(drm_device_t *dev);
extern int nv40_mc_init(drm_device_t *dev);
extern void nv40_mc_takedown(drm_device_t *dev);
/* nv50_mc.c */
extern int nv50_mc_init(drm_device_t *dev);
extern void nv50_mc_takedown(drm_device_t *dev);
/* nv04_timer.c */
extern int nv04_timer_init(drm_device_t *dev);
extern void nv04_timer_takedown(drm_device_t *dev);
@ -297,8 +448,17 @@ extern long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
#define NV_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) )
#endif
#define INSTANCE_WR(mem,ofs,val) nouveau_instmem_w32(dev_priv,(mem),(ofs),(val))
#define INSTANCE_RD(mem,ofs) nouveau_instmem_r32(dev_priv,(mem),(ofs))
/* PRAMIN access */
#if defined(__powerpc__)
#define NV_RI32(o) in_be32((void __iomem *)(dev_priv->ramin)->handle+(o))
#define NV_WI32(o,v) out_be32((void __iomem*)(dev_priv->ramin)->handle+(o), (v))
#else
#define NV_RI32(o) DRM_READ32(dev_priv->ramin, (o))
#define NV_WI32(o,v) DRM_WRITE32(dev_priv->ramin, (o), (v))
#endif
#define INSTANCE_RD(o,i) NV_RI32((o)->im_pramin->start + ((i)<<2))
#define INSTANCE_WR(o,i,v) NV_WI32((o)->im_pramin->start + ((i)<<2), (v))
#endif /* __NOUVEAU_DRV_H__ */

View File

@ -39,6 +39,8 @@ int nouveau_fifo_number(drm_device_t* dev)
case NV_04:
case NV_05:
return 16;
case NV_50:
return 128;
default:
return 32;
}
@ -51,7 +53,7 @@ int nouveau_fifo_ctx_size(drm_device_t* dev)
if (dev_priv->card_type >= NV_40)
return 128;
else if (dev_priv->card_type >= NV_10)
else if (dev_priv->card_type >= NV_17)
return 64;
else
return 32;
@ -83,6 +85,8 @@ static int nouveau_fifo_instmem_configure(drm_device_t *dev)
case NV_50:
case NV_40:
NV_WRITE(NV40_PFIFO_RAMFC, 0x30002);
if((dev_priv->chipset == 0x49) || (dev_priv->chipset == 0x4b))
NV_WRITE(0x2230,0x00000001);
break;
case NV_44:
NV_WRITE(NV40_PFIFO_RAMFC, ((nouveau_mem_fb_amount(dev)-512*1024+dev_priv->ramfc_offset)>>16) |
@ -90,10 +94,12 @@ static int nouveau_fifo_instmem_configure(drm_device_t *dev)
break;
case NV_30:
case NV_20:
case NV_10:
case NV_17:
NV_WRITE(NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset>>8) |
(1 << 16) /* 64 Bytes entry*/);
/* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
break;
case NV_10:
case NV_04:
case NV_03:
NV_WRITE(NV03_PFIFO_RAMFC, dev_priv->ramfc_offset>>8);
@ -182,10 +188,12 @@ static int
nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
struct nouveau_config *config = &dev_priv->config;
struct mem_block *cb;
struct nouveau_object *cb_dma = NULL;
int cb_min_size = max(NV03_FIFO_SIZE,PAGE_SIZE);
nouveau_gpuobj_t *pushbuf = NULL;
int ret;
/* Defaults for unconfigured values */
if (!config->cmdbuf.location)
@ -202,260 +210,61 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel)
}
if (cb->flags & NOUVEAU_MEM_AGP) {
cb_dma = nouveau_object_dma_create(dev, channel,
DRM_DEBUG("Creating CB in AGP memory\n");
ret = nouveau_gpuobj_dma_new(dev, channel,
NV_CLASS_DMA_IN_MEMORY,
cb->start - dev_priv->agp_phys,
cb->size,
NV_DMA_ACCESS_RO, NV_DMA_TARGET_AGP);
} else if (dev_priv->card_type != NV_04) {
cb_dma = nouveau_object_dma_create(dev, channel,
NV_DMA_ACCESS_RO, NV_DMA_TARGET_AGP, &pushbuf);
} else if ( cb->flags & NOUVEAU_MEM_PCI) {
DRM_DEBUG("Creating CB in PCI memory starting at virt 0x%08llx size %d\n", cb->start, cb->size);
ret = nouveau_gpuobj_dma_new(dev, channel,
NV_CLASS_DMA_IN_MEMORY,
cb->start - drm_get_resource_start(dev, 1),
cb->start,
cb->size,
NV_DMA_ACCESS_RO, NV_DMA_TARGET_VIDMEM);
NV_DMA_ACCESS_RO, NV_DMA_TARGET_PCI_NONLINEAR, &pushbuf);
} else if (dev_priv->card_type != NV_04) {
ret = nouveau_gpuobj_dma_new
(dev, channel, NV_CLASS_DMA_IN_MEMORY,
cb->start - drm_get_resource_start(dev, 1),
cb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_VIDMEM,
&pushbuf);
} else {
/* NV04 cmdbuf hack, from original ddx.. not sure of it's
* exact reason for existing :) PCI access to cmdbuf in
* VRAM.
*/
cb_dma = nouveau_object_dma_create(dev, channel,
NV_CLASS_DMA_IN_MEMORY,
cb->start, cb->size,
NV_DMA_ACCESS_RO, NV_DMA_TARGET_PCI);
ret = nouveau_gpuobj_dma_new
(dev, channel, NV_CLASS_DMA_IN_MEMORY,
cb->start, cb->size, NV_DMA_ACCESS_RO,
NV_DMA_TARGET_PCI, &pushbuf);
}
if (!cb_dma) {
if (ret) {
nouveau_mem_free(dev, cb);
DRM_ERROR("Failed to alloc DMA object for command buffer\n");
return DRM_ERR(ENOMEM);
DRM_ERROR("Error creating push buffer ctxdma: %d\n", ret);
return ret;
}
dev_priv->fifos[channel].cmdbuf_mem = cb;
dev_priv->fifos[channel].cmdbuf_obj = cb_dma;
if ((ret = nouveau_gpuobj_ref_add(dev, channel, 0, pushbuf,
&chan->pushbuf))) {
DRM_ERROR("Error referencing push buffer ctxdma: %d\n", ret);
return ret;
}
dev_priv->fifos[channel]->pushbuf_base = 0;
dev_priv->fifos[channel]->pushbuf_mem = cb;
return 0;
}
#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV04_RAMFC_##offset, (val))
static void nouveau_nv04_context_init(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_object *cb_obj;
uint32_t fifoctx, ctx_size = 32;
int i;
cb_obj = dev_priv->fifos[channel].cmdbuf_obj;
fifoctx=NV_RAMIN+dev_priv->ramfc_offset+channel*ctx_size;
// clear the fifo context
for(i=0;i<ctx_size/4;i++)
NV_WRITE(fifoctx+4*i,0x0);
RAMFC_WR(DMA_INSTANCE , nouveau_chip_instance_get(dev, cb_obj->instance));
RAMFC_WR(DMA_FETCH, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
0x00000000);
}
#undef RAMFC_WR
#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV10_RAMFC_##offset, (val))
static void nouveau_nv10_context_init(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_object *cb_obj;
uint32_t fifoctx;
int i;
cb_obj = dev_priv->fifos[channel].cmdbuf_obj;
fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*64;
for (i=0;i<64;i+=4)
NV_WRITE(fifoctx + i, 0);
/* Fill entries that are seen filled in dumps of nvidia driver just
* after channel's is put into DMA mode
*/
RAMFC_WR(DMA_INSTANCE , nouveau_chip_instance_get(dev,
cb_obj->instance));
RAMFC_WR(DMA_FETCH, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
0x00000000);
}
static void nouveau_nv30_context_init(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = &dev_priv->fifos[channel];
struct nouveau_object *cb_obj;
uint32_t fifoctx, grctx_inst, cb_inst, ctx_size = 64;
int i;
cb_obj = dev_priv->fifos[channel].cmdbuf_obj;
cb_inst = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance);
grctx_inst = nouveau_chip_instance_get(dev, chan->ramin_grctx);
fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel * ctx_size;
for (i = 0; i < ctx_size; i += 4)
NV_WRITE(fifoctx + i, 0);
RAMFC_WR(REF_CNT, NV_READ(NV10_PFIFO_CACHE1_REF_CNT));
RAMFC_WR(DMA_INSTANCE, cb_inst);
RAMFC_WR(DMA_STATE, NV_READ(NV04_PFIFO_CACHE1_DMA_STATE));
RAMFC_WR(DMA_FETCH, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
0x00000000);
RAMFC_WR(ENGINE, NV_READ(NV04_PFIFO_CACHE1_ENGINE));
RAMFC_WR(PULL1_ENGINE, NV_READ(NV04_PFIFO_CACHE1_PULL1));
RAMFC_WR(ACQUIRE_VALUE, NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
RAMFC_WR(ACQUIRE_TIMESTAMP, NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP));
RAMFC_WR(ACQUIRE_TIMEOUT, NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
RAMFC_WR(SEMAPHORE, NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE));
}
static void nouveau_nv10_context_save(drm_device_t *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
uint32_t fifoctx;
int channel;
channel = NV_READ(NV03_PFIFO_CACHE1_PUSH1) & (nouveau_fifo_number(dev)-1);
fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*64;
RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT));
RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
RAMFC_WR(REF_CNT , NV_READ(NV10_PFIFO_CACHE1_REF_CNT));
RAMFC_WR(DMA_INSTANCE , NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE));
RAMFC_WR(DMA_STATE , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE));
RAMFC_WR(DMA_FETCH , NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH));
RAMFC_WR(ENGINE , NV_READ(NV04_PFIFO_CACHE1_ENGINE));
RAMFC_WR(PULL1_ENGINE , NV_READ(NV04_PFIFO_CACHE1_PULL1));
RAMFC_WR(ACQUIRE_VALUE , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
RAMFC_WR(ACQUIRE_TIMESTAMP, NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP));
RAMFC_WR(ACQUIRE_TIMEOUT , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
RAMFC_WR(SEMAPHORE , NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE));
RAMFC_WR(DMA_SUBROUTINE , NV_READ(NV10_PFIFO_CACHE1_DMA_SUBROUTINE));
}
#undef RAMFC_WR
#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV40_RAMFC_##offset, (val))
static void nouveau_nv40_context_init(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = &dev_priv->fifos[channel];
uint32_t fifoctx, cb_inst, grctx_inst;
int i;
cb_inst = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance);
grctx_inst = nouveau_chip_instance_get(dev, chan->ramin_grctx);
fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*128;
for (i=0;i<128;i+=4)
NV_WRITE(fifoctx + i, 0);
/* Fill entries that are seen filled in dumps of nvidia driver just
* after channel's is put into DMA mode
*/
RAMFC_WR(DMA_INSTANCE , cb_inst);
RAMFC_WR(DMA_FETCH , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
0x30000000 /* no idea.. */);
RAMFC_WR(GRCTX_INSTANCE, grctx_inst);
RAMFC_WR(DMA_TIMESLICE , 0x0001FFFF);
}
static void nouveau_nv40_context_save(drm_device_t *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
uint32_t fifoctx;
int channel;
channel = NV_READ(NV03_PFIFO_CACHE1_PUSH1) & (nouveau_fifo_number(dev)-1);
fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*128;
RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT));
RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
RAMFC_WR(REF_CNT , NV_READ(NV10_PFIFO_CACHE1_REF_CNT));
RAMFC_WR(DMA_INSTANCE , NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE));
RAMFC_WR(DMA_DCOUNT , NV_READ(NV10_PFIFO_CACHE1_DMA_DCOUNT));
RAMFC_WR(DMA_STATE , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE));
RAMFC_WR(DMA_FETCH , NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH));
RAMFC_WR(ENGINE , NV_READ(NV04_PFIFO_CACHE1_ENGINE));
RAMFC_WR(PULL1_ENGINE , NV_READ(NV04_PFIFO_CACHE1_PULL1));
RAMFC_WR(ACQUIRE_VALUE , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
RAMFC_WR(ACQUIRE_TIMESTAMP, NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP));
RAMFC_WR(ACQUIRE_TIMEOUT , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
RAMFC_WR(SEMAPHORE , NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE));
RAMFC_WR(DMA_SUBROUTINE , NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
RAMFC_WR(GRCTX_INSTANCE , NV_READ(NV40_PFIFO_GRCTX_INSTANCE));
RAMFC_WR(DMA_TIMESLICE , NV_READ(NV04_PFIFO_DMA_TIMESLICE) & 0x1FFFF);
RAMFC_WR(UNK_40 , NV_READ(NV40_PFIFO_UNK32E4));
}
#undef RAMFC_WR
/* This function should load values from RAMFC into PFIFO, but for now
* it just clobbers PFIFO with what nouveau_fifo_alloc used to setup
* unconditionally.
*/
static void
nouveau_fifo_context_restore(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = &dev_priv->fifos[channel];
uint32_t cb_inst;
cb_inst = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance);
// FIXME check if we need to refill the time quota with something like NV_WRITE(0x204C, 0x0003FFFF);
if (dev_priv->card_type >= NV_40)
NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, 0x00010000|channel);
else
NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, 0x00000100|channel);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, 0 /*RAMFC_DMA_PUT*/);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, 0 /*RAMFC_DMA_GET*/);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, cb_inst);
NV_WRITE(NV04_PFIFO_SIZE , 0x0000FFFF);
NV_WRITE(NV04_PFIFO_CACHE1_HASH, 0x0000FFFF);
NV_WRITE(NV04_PFIFO_CACHE0_PULL1, 0x00000001);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, 0x00000000);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
NV_WRITE(NV04_PFIFO_CACHE1_ENGINE, 0x00000000);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
0x00000000);
}
/* allocates and initializes a fifo for user space consumption */
static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp)
int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp,
uint32_t vram_handle, uint32_t tt_handle)
{
int ret;
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_object *cb_obj;
nouveau_engine_func_t *engine = &dev_priv->Engine;
struct nouveau_fifo *chan;
int channel;
/*
@ -466,21 +275,33 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp)
* (woo, full userspace command submission !)
* When there are no more contexts, you lost
*/
for(channel=0; channel<nouveau_fifo_number(dev); channel++)
if (dev_priv->fifos[channel].used==0)
for(channel=0; channel<nouveau_fifo_number(dev); channel++) {
if ((dev_priv->card_type == NV_50) && (channel == 0))
continue;
if (dev_priv->fifos[channel] == NULL)
break;
}
/* no more fifos. you lost. */
if (channel==nouveau_fifo_number(dev))
return DRM_ERR(EINVAL);
(*chan_ret) = channel;
dev_priv->fifos[channel] = drm_calloc(1, sizeof(struct nouveau_fifo),
DRM_MEM_DRIVER);
if (!dev_priv->fifos[channel])
return DRM_ERR(ENOMEM);
dev_priv->fifo_alloc_count++;
chan = dev_priv->fifos[channel];
chan->filp = filp;
DRM_INFO("Allocating FIFO number %d\n", channel);
/* that fifo is used */
dev_priv->fifos[channel].used = 1;
dev_priv->fifos[channel].filp = filp;
/* FIFO has no objects yet */
dev_priv->fifos[channel].objs = NULL;
/* Setup channel's default objects */
ret = nouveau_gpuobj_channel_init(dev, channel, vram_handle, tt_handle);
if (ret) {
nouveau_fifo_free(dev, channel);
return ret;
}
/* allocate a command buffer, and create a dma object for the gpu */
ret = nouveau_fifo_cmdbuf_alloc(dev, channel);
@ -488,7 +309,13 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp)
nouveau_fifo_free(dev, channel);
return ret;
}
cb_obj = dev_priv->fifos[channel].cmdbuf_obj;
/* Allocate space for per-channel fixed notifier memory */
ret = nouveau_notifier_init_channel(dev, channel, filp);
if (ret) {
nouveau_fifo_free(dev, channel);
return ret;
}
nouveau_wait_for_idle(dev);
@ -498,84 +325,67 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp)
NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
/* Construct inital RAMFC for new channel */
switch(dev_priv->card_type)
{
case NV_04:
case NV_05:
nv04_graph_context_create(dev, channel);
nouveau_nv04_context_init(dev, channel);
break;
case NV_10:
nv10_graph_context_create(dev, channel);
nouveau_nv10_context_init(dev, channel);
break;
case NV_20:
ret = nv20_graph_context_create(dev, channel);
if (ret) {
nouveau_fifo_free(dev, channel);
return ret;
}
nouveau_nv10_context_init(dev, channel);
break;
case NV_30:
ret = nv30_graph_context_create(dev, channel);
if (ret) {
nouveau_fifo_free(dev, channel);
return ret;
}
nouveau_nv30_context_init(dev, channel);
break;
case NV_40:
case NV_44:
case NV_50:
ret = nv40_graph_context_create(dev, channel);
if (ret) {
nouveau_fifo_free(dev, channel);
return ret;
}
nouveau_nv40_context_init(dev, channel);
break;
/* Create a graphics context for new channel */
ret = engine->graph.create_context(dev, channel);
if (ret) {
nouveau_fifo_free(dev, channel);
return ret;
}
/* enable the fifo dma operation */
NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<<channel));
/* Construct inital RAMFC for new channel */
ret = engine->fifo.create_context(dev, channel);
if (ret) {
nouveau_fifo_free(dev, channel);
return ret;
}
/* setup channel's default get/put values */
NV_WRITE(NV03_FIFO_REGS_DMAPUT(channel), 0);
NV_WRITE(NV03_FIFO_REGS_DMAGET(channel), 0);
if (dev_priv->card_type < NV_50) {
NV_WRITE(NV03_FIFO_REGS_DMAPUT(channel), chan->pushbuf_base);
NV_WRITE(NV03_FIFO_REGS_DMAGET(channel), chan->pushbuf_base);
} else {
NV_WRITE(NV50_FIFO_REGS_DMAPUT(channel), chan->pushbuf_base);
NV_WRITE(NV50_FIFO_REGS_DMAGET(channel), chan->pushbuf_base);
}
/* If this is the first channel, setup PFIFO ourselves. For any
* other case, the GPU will handle this when it switches contexts.
*/
if (dev_priv->fifo_alloc_count == 0) {
nouveau_fifo_context_restore(dev, channel);
if (dev_priv->card_type >= NV_30) {
struct nouveau_fifo *chan;
uint32_t inst;
if (dev_priv->fifo_alloc_count == 1) {
ret = engine->fifo.load_context(dev, channel);
if (ret) {
nouveau_fifo_free(dev, channel);
return ret;
}
chan = &dev_priv->fifos[channel];
inst = nouveau_chip_instance_get(dev,
chan->ramin_grctx);
ret = engine->graph.load_context(dev, channel);
if (ret) {
nouveau_fifo_free(dev, channel);
return ret;
}
/* see comments in nv40_graph_context_restore() */
NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_SIZE, inst);
if (dev_priv->card_type >= NV_40) {
NV_WRITE(0x40032C, inst | 0x01000000);
NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE, inst);
}
/* Temporary hack, to avoid breaking Xv on cards where the
* initial context value for 0x400710 doesn't have these bits
* set. Proper fix would be to find which object+method is
* responsible for modifying this state.
*/
if (dev_priv->chipset >= 0x10 && dev_priv->chipset < 0x50) {
uint32_t tmp;
tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00;
NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100;
NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
}
}
NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
NV_WRITE(NV04_PFIFO_CACHE1_PULL1, 0x00000001);
/* reenable the fifo caches */
NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
dev_priv->fifo_alloc_count++;
NV_WRITE(NV03_PFIFO_CACHES, 1);
DRM_INFO("%s: initialised FIFO %d\n", __func__, channel);
return 0;
@ -585,50 +395,44 @@ static int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp)
void nouveau_fifo_free(drm_device_t* dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = &dev_priv->fifos[channel];
int i;
int ctx_size = nouveau_fifo_ctx_size(dev);
nouveau_engine_func_t *engine = &dev_priv->Engine;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
if (!chan) {
DRM_ERROR("Freeing non-existant channel %d\n", channel);
return;
}
chan->used = 0;
DRM_INFO("%s: freeing fifo %d\n", __func__, channel);
/* disable the fifo caches */
NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<channel));
// FIXME XXX needs more code
/* Clean RAMFC */
for (i=0;i<ctx_size;i+=4) {
DRM_DEBUG("RAMFC +%02x: 0x%08x\n", i, NV_READ(NV_RAMIN +
dev_priv->ramfc_offset +
channel*ctx_size + i));
NV_WRITE(NV_RAMIN + dev_priv->ramfc_offset +
channel*ctx_size + i, 0);
}
engine->fifo.destroy_context(dev, channel);
/* Cleanup PGRAPH state */
if (dev_priv->card_type >= NV_40)
nouveau_instmem_free(dev, chan->ramin_grctx);
else if (dev_priv->card_type >= NV_30) {
}
else if (dev_priv->card_type >= NV_20) {
/* clear ctx table */
INSTANCE_WR(dev_priv->ctx_table, channel, 0);
nouveau_instmem_free(dev, chan->ramin_grctx);
}
engine->graph.destroy_context(dev, channel);
/* reenable the fifo caches */
NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
/* Deallocate command buffer */
if (chan->cmdbuf_mem)
nouveau_mem_free(dev, chan->cmdbuf_mem);
/* Deallocate push buffer */
nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
if (chan->pushbuf_mem) {
nouveau_mem_free(dev, chan->pushbuf_mem);
chan->pushbuf_mem = NULL;
}
nouveau_notifier_takedown_channel(dev, channel);
/* Destroy objects belonging to the channel */
nouveau_object_cleanup(dev, channel);
nouveau_gpuobj_channel_takedown(dev, channel);
dev_priv->fifos[channel] = NULL;
dev_priv->fifo_alloc_count--;
drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER);
}
/* cleanups all the fifos from filp */
@ -639,7 +443,7 @@ void nouveau_fifo_cleanup(drm_device_t* dev, DRMFILE filp)
DRM_DEBUG("clearing FIFO enables from filp\n");
for(i=0;i<nouveau_fifo_number(dev);i++)
if (dev_priv->fifos[i].used && dev_priv->fifos[i].filp==filp)
if (dev_priv->fifos[i] && dev_priv->fifos[i]->filp==filp)
nouveau_fifo_free(dev,i);
}
@ -650,9 +454,9 @@ nouveau_fifo_owner(drm_device_t *dev, DRMFILE filp, int channel)
if (channel >= nouveau_fifo_number(dev))
return 0;
if (dev_priv->fifos[channel].used == 0)
if (dev_priv->fifos[channel] == NULL)
return 0;
return (dev_priv->fifos[channel].filp == filp);
return (dev_priv->fifos[channel]->filp == filp);
}
/***********************************
@ -663,31 +467,44 @@ static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan;
drm_nouveau_fifo_alloc_t init;
int res;
DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_fifo_alloc_t __user *) data,
sizeof(init));
res = nouveau_fifo_alloc(dev, &init.channel, filp);
res = nouveau_fifo_alloc(dev, &init.channel, filp,
init.fb_ctxdma_handle,
init.tt_ctxdma_handle);
if (res)
return res;
chan = dev_priv->fifos[init.channel];
/* this should probably disappear in the next abi break? */
init.put_base = 0;
init.put_base = chan->pushbuf_base;
/* make the fifo available to user space */
/* first, the fifo control regs */
init.ctrl = dev_priv->mmio->offset + NV03_FIFO_REGS(init.channel);
init.ctrl_size = NV03_FIFO_REGS_SIZE;
init.ctrl = dev_priv->mmio->offset;
if (dev_priv->card_type < NV_50) {
init.ctrl += NV03_FIFO_REGS(init.channel);
init.ctrl_size = NV03_FIFO_REGS_SIZE;
} else {
init.ctrl += NV50_FIFO_REGS(init.channel);
init.ctrl_size = NV50_FIFO_REGS_SIZE;
}
res = drm_addmap(dev, init.ctrl, init.ctrl_size, _DRM_REGISTERS,
0, &dev_priv->fifos[init.channel].regs);
0, &chan->regs);
if (res != 0)
return res;
/* pass back FIFO map info to the caller */
init.cmdbuf = dev_priv->fifos[init.channel].cmdbuf_mem->start;
init.cmdbuf_size = dev_priv->fifos[init.channel].cmdbuf_mem->size;
init.cmdbuf = chan->pushbuf_mem->start;
init.cmdbuf_size = chan->pushbuf_mem->size;
/* and the notifier block */
init.notifier = chan->notifier_block->start;
init.notifier_size = chan->notifier_block->size;
DRM_COPY_TO_USER_IOCTL((drm_nouveau_fifo_alloc_t __user *)data,
init, sizeof(init));
@ -700,8 +517,8 @@ static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS)
drm_ioctl_desc_t nouveau_ioctls[] = {
[DRM_IOCTL_NR(DRM_NOUVEAU_FIFO_ALLOC)] = {nouveau_ioctl_fifo_alloc, DRM_AUTH},
[DRM_IOCTL_NR(DRM_NOUVEAU_OBJECT_INIT)] = {nouveau_ioctl_object_init, DRM_AUTH},
[DRM_IOCTL_NR(DRM_NOUVEAU_DMA_OBJECT_INIT)] = {nouveau_ioctl_dma_object_init, DRM_AUTH},
[DRM_IOCTL_NR(DRM_NOUVEAU_GROBJ_ALLOC)] = {nouveau_ioctl_grobj_alloc, DRM_AUTH},
[DRM_IOCTL_NR(DRM_NOUVEAU_NOTIFIER_ALLOC)] = {nouveau_ioctl_notifier_alloc, DRM_AUTH},
[DRM_IOCTL_NR(DRM_NOUVEAU_MEM_ALLOC)] = {nouveau_ioctl_mem_alloc, DRM_AUTH},
[DRM_IOCTL_NR(DRM_NOUVEAU_MEM_FREE)] = {nouveau_ioctl_mem_free, DRM_AUTH},
[DRM_IOCTL_NR(DRM_NOUVEAU_GETPARAM)] = {nouveau_ioctl_getparam, DRM_AUTH},

View File

@ -87,34 +87,14 @@ void nouveau_irq_postinstall(drm_device_t *dev)
DRM_DEBUG("IRQ: postinst\n");
/* Enable PFIFO error reporting */
NV_WRITE(NV03_PFIFO_INTR_EN_0 ,
NV_PFIFO_INTR_CACHE_ERROR |
NV_PFIFO_INTR_RUNOUT |
NV_PFIFO_INTR_RUNOUT_OVERFLOW |
NV_PFIFO_INTR_DMA_PUSHER |
NV_PFIFO_INTR_DMA_PT |
NV_PFIFO_INTR_SEMAPHORE |
NV_PFIFO_INTR_ACQUIRE_TIMEOUT
);
NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF);
/* Enable PGRAPH interrupts */
if (dev_priv->card_type<NV_40)
NV_WRITE(NV03_PGRAPH_INTR_EN,
NV_PGRAPH_INTR_NOTIFY |
NV_PGRAPH_INTR_MISSING_HW |
NV_PGRAPH_INTR_CONTEXT_SWITCH |
NV_PGRAPH_INTR_BUFFER_NOTIFY |
NV_PGRAPH_INTR_ERROR
);
NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
else
NV_WRITE(NV40_PGRAPH_INTR_EN,
NV_PGRAPH_INTR_NOTIFY |
NV_PGRAPH_INTR_MISSING_HW |
NV_PGRAPH_INTR_CONTEXT_SWITCH |
NV_PGRAPH_INTR_BUFFER_NOTIFY |
NV_PGRAPH_INTR_ERROR
);
NV_WRITE(NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
NV_WRITE(NV03_PGRAPH_INTR, 0xFFFFFFFF);
#if 0
@ -271,22 +251,25 @@ nouveau_graph_dump_trap_info(drm_device_t *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
uint32_t address;
uint32_t channel;
uint32_t channel, class;
uint32_t method, subc, data;
address = NV_READ(0x400704);
data = NV_READ(0x400708);
channel = (address >> 20) & 0x1F;
subc = (address >> 16) & 0x7;
method = address & 0x1FFC;
data = NV_READ(0x400708);
if (dev_priv->card_type < NV_50) {
class = NV_READ(0x400160 + subc*4) & 0xFFFF;
} else {
class = NV_READ(0x400814);
}
DRM_ERROR("NV: nSource: 0x%08x, nStatus: 0x%08x\n",
NV_READ(0x400108), NV_READ(0x400104));
DRM_ERROR("NV: Channel %d/%d (class 0x%04x) -"
"Method 0x%04x, Data 0x%08x\n",
channel, subc,
NV_READ(0x400160+subc*4) & 0xFFFF,
method, data
channel, subc, class, method, data
);
}
@ -314,7 +297,7 @@ static void nouveau_pgraph_irq_handler(drm_device_t *dev)
instance = NV_READ(0x00400158);
notify = NV_READ(0x00400150) >> 16;
DRM_DEBUG("instance:0x%08x\tnotify:0x%08x\n",
nsource, nstatus);
instance, notify);
}
status &= ~NV_PGRAPH_INTR_NOTIFY;
@ -372,6 +355,7 @@ static void nouveau_pgraph_irq_handler(drm_device_t *dev)
nouveau_nv04_context_switch(dev);
break;
case NV_10:
case NV_17:
nouveau_nv10_context_switch(dev);
break;
case NV_20:

View File

@ -77,8 +77,8 @@ out:
return p;
}
static struct mem_block *alloc_block(struct mem_block *heap, uint64_t size,
int align2, DRMFILE filp)
struct mem_block *nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size,
int align2, DRMFILE filp)
{
struct mem_block *p;
uint64_t mask = (1 << align2) - 1;
@ -106,7 +106,7 @@ static struct mem_block *find_block(struct mem_block *heap, uint64_t start)
return NULL;
}
static void free_block(struct mem_block *p)
void nouveau_mem_free_block(struct mem_block *p)
{
p->filp = NULL;
@ -132,7 +132,8 @@ static void free_block(struct mem_block *p)
/* Initialize. How to check for an uninitialized heap?
*/
static int init_heap(struct mem_block **heap, uint64_t start, uint64_t size)
int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start,
uint64_t size)
{
struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS);
@ -188,7 +189,7 @@ void nouveau_mem_release(DRMFILE filp, struct mem_block *heap)
/*
* Cleanup everything
*/
static void nouveau_mem_takedown(struct mem_block **heap)
void nouveau_mem_takedown(struct mem_block **heap)
{
struct mem_block *p;
@ -210,6 +211,10 @@ void nouveau_mem_close(struct drm_device *dev)
drm_nouveau_private_t *dev_priv = dev->dev_private;
nouveau_mem_takedown(&dev_priv->agp_heap);
nouveau_mem_takedown(&dev_priv->fb_heap);
if ( dev_priv->pci_heap )
{
nouveau_mem_takedown(&dev_priv->pci_heap);
}
}
/* returns the amount of FB ram in bytes */
@ -248,6 +253,7 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
}
break;
case NV_10:
case NV_17:
case NV_20:
case NV_30:
case NV_40:
@ -281,8 +287,10 @@ int nouveau_mem_init(struct drm_device *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
uint32_t fb_size;
drm_scatter_gather_t sgreq;
dev_priv->agp_phys=0;
dev_priv->fb_phys=0;
sgreq . size = 4 << 20; //4MB of PCI scatter-gather zone
/* init AGP */
dev_priv->agp_heap=NULL;
@ -330,14 +338,34 @@ int nouveau_mem_init(struct drm_device *dev)
goto no_agp;
}
if (init_heap(&dev_priv->agp_heap, info.aperture_base, info.aperture_size))
if (nouveau_mem_init_heap(&dev_priv->agp_heap,
info.aperture_base,
info.aperture_size))
goto no_agp;
dev_priv->agp_phys = info.aperture_base;
dev_priv->agp_available_size = info.aperture_size;
}
no_agp:
goto have_agp;
no_agp:
dev_priv->pci_heap = NULL;
DRM_DEBUG("Allocating sg memory for PCI DMA\n");
if ( drm_sg_alloc(dev, &sgreq) )
{
DRM_ERROR("Unable to allocate 4MB of scatter-gather pages for PCI DMA!");
goto no_pci;
}
DRM_DEBUG("Got %d KiB\n", (dev->sg->pages * PAGE_SIZE) >> 10);
if ( nouveau_mem_init_heap(&dev_priv->pci_heap, dev->sg->virtual, dev->sg->pages * PAGE_SIZE))
{
DRM_ERROR("Unable to initialize pci_heap!");
goto no_pci;
}
no_pci:
have_agp:
/* setup a mtrr over the FB */
dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
nouveau_mem_fb_amount(dev),
@ -349,19 +377,26 @@ no_agp:
/* On at least NV40, RAMIN is actually at the end of vram.
* We don't want to allocate this... */
if (dev_priv->card_type >= NV_40)
fb_size -= dev_priv->ramin_size;
fb_size -= dev_priv->ramin_rsvd_vram;
dev_priv->fb_available_size = fb_size;
DRM_DEBUG("Available VRAM: %dKiB\n", fb_size>>10);
if (fb_size>256*1024*1024) {
/* On cards with > 256Mb, you can't map everything.
* So we create a second FB heap for that type of memory */
if (init_heap(&dev_priv->fb_heap, drm_get_resource_start(dev,1), 256*1024*1024))
if (nouveau_mem_init_heap(&dev_priv->fb_heap,
drm_get_resource_start(dev,1),
256*1024*1024))
return DRM_ERR(ENOMEM);
if (init_heap(&dev_priv->fb_nomap_heap, drm_get_resource_start(dev,1)+256*1024*1024, fb_size-256*1024*1024))
if (nouveau_mem_init_heap(&dev_priv->fb_nomap_heap,
drm_get_resource_start(dev,1) +
256*1024*1024,
fb_size-256*1024*1024))
return DRM_ERR(ENOMEM);
} else {
if (init_heap(&dev_priv->fb_heap, drm_get_resource_start(dev,1), fb_size))
if (nouveau_mem_init_heap(&dev_priv->fb_heap,
drm_get_resource_start(dev,1),
fb_size))
return DRM_ERR(ENOMEM);
dev_priv->fb_nomap_heap=NULL;
}
@ -394,25 +429,40 @@ struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment, uint6
if (size & (~PAGE_MASK))
size = ((size/PAGE_SIZE) + 1) * PAGE_SIZE;
if (flags&NOUVEAU_MEM_AGP) {
type=NOUVEAU_MEM_AGP;
block = alloc_block(dev_priv->agp_heap, size, alignment, filp);
if (block) goto alloc_ok;
}
if (flags&(NOUVEAU_MEM_FB|NOUVEAU_MEM_FB_ACCEPTABLE)) {
type=NOUVEAU_MEM_FB;
if (!(flags&NOUVEAU_MEM_MAPPED)) {
block = alloc_block(dev_priv->fb_nomap_heap, size, alignment, filp);
if (block) goto alloc_ok;
}
block = alloc_block(dev_priv->fb_heap, size, alignment, filp);
if (block) goto alloc_ok;
}
if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) {
type=NOUVEAU_MEM_AGP;
block = alloc_block(dev_priv->agp_heap, size, alignment, filp);
if (block) goto alloc_ok;
}
#define NOUVEAU_MEM_ALLOC_AGP {\
type=NOUVEAU_MEM_AGP;\
block = nouveau_mem_alloc_block(dev_priv->agp_heap, size,\
alignment, filp);\
if (block) goto alloc_ok;\
}
#define NOUVEAU_MEM_ALLOC_PCI {\
type = NOUVEAU_MEM_PCI;\
block = nouveau_mem_alloc_block(dev_priv->pci_heap, size, alignment, filp);\
if ( block ) goto alloc_ok;\
}
#define NOUVEAU_MEM_ALLOC_FB {\
type=NOUVEAU_MEM_FB;\
if (!(flags&NOUVEAU_MEM_MAPPED)) {\
block = nouveau_mem_alloc_block(dev_priv->fb_nomap_heap,\
size, alignment, filp); \
if (block) goto alloc_ok;\
}\
block = nouveau_mem_alloc_block(dev_priv->fb_heap, size,\
alignment, filp);\
if (block) goto alloc_ok;\
}
if (flags&NOUVEAU_MEM_FB) NOUVEAU_MEM_ALLOC_FB
if (flags&NOUVEAU_MEM_AGP) NOUVEAU_MEM_ALLOC_AGP
if (flags&NOUVEAU_MEM_PCI) NOUVEAU_MEM_ALLOC_PCI
if (flags&NOUVEAU_MEM_FB_ACCEPTABLE) NOUVEAU_MEM_ALLOC_FB
if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) NOUVEAU_MEM_ALLOC_AGP
if (flags&NOUVEAU_MEM_PCI_ACCEPTABLE) NOUVEAU_MEM_ALLOC_PCI
return NULL;
@ -421,17 +471,21 @@ alloc_ok:
if (flags&NOUVEAU_MEM_MAPPED)
{
int ret;
int ret = 0;
block->flags|=NOUVEAU_MEM_MAPPED;
if (type == NOUVEAU_MEM_AGP)
ret = drm_addmap(dev, block->start - dev->agp->base, block->size,
_DRM_AGP, 0, &block->map);
else
else if (type == NOUVEAU_MEM_FB)
ret = drm_addmap(dev, block->start, block->size,
_DRM_FRAME_BUFFER, 0, &block->map);
else if (type == NOUVEAU_MEM_PCI)
ret = drm_addmap(dev, block->start - (unsigned long int)dev->sg->virtual, block->size,
_DRM_SCATTER_GATHER, 0, &block->map);
if (ret) {
free_block(block);
nouveau_mem_free_block(block);
return NULL;
}
}
@ -445,176 +499,7 @@ void nouveau_mem_free(struct drm_device* dev, struct mem_block* block)
DRM_INFO("freeing 0x%llx\n", block->start);
if (block->flags&NOUVEAU_MEM_MAPPED)
drm_rmmap(dev, block->map);
free_block(block);
}
static void
nouveau_instmem_determine_amount(struct drm_device *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
int i;
/* Figure out how much instance memory we need */
switch (dev_priv->card_type) {
case NV_40:
/* We'll want more instance memory than this on some NV4x cards.
* There's a 16MB aperture to play with that maps onto the end
* of vram. For now, only reserve a small piece until we know
* more about what each chipset requires.
*/
dev_priv->ramin_size = (1*1024* 1024);
break;
default:
/*XXX: what *are* the limits on <NV40 cards?, and does RAMIN
* exist in vram on those cards as well?
*/
dev_priv->ramin_size = (512*1024);
break;
}
DRM_DEBUG("RAMIN size: %dKiB\n", dev_priv->ramin_size>>10);
/* Clear all of it, except the BIOS image that's in the first 64KiB */
if (dev_priv->ramin) {
for (i=(64*1024); i<dev_priv->ramin_size; i+=4)
DRM_WRITE32(dev_priv->ramin, i, 0x00000000);
} else {
for (i=(64*1024); i<dev_priv->ramin_size; i+=4)
DRM_WRITE32(dev_priv->mmio, NV_RAMIN + i, 0x00000000);
}
}
static void
nouveau_instmem_configure_fixed_tables(struct drm_device *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
/* FIFO hash table (RAMHT)
* use 4k hash table at RAMIN+0x10000
* TODO: extend the hash table
*/
dev_priv->ramht_offset = 0x10000;
dev_priv->ramht_bits = 9;
dev_priv->ramht_size = (1 << dev_priv->ramht_bits);
DRM_DEBUG("RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset,
dev_priv->ramht_size);
/* FIFO runout table (RAMRO) - 512k at 0x11200 */
dev_priv->ramro_offset = 0x11200;
dev_priv->ramro_size = 512;
DRM_DEBUG("RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset,
dev_priv->ramro_size);
/* FIFO context table (RAMFC)
* NV40 : Not sure exactly how to position RAMFC on some cards,
* 0x30002 seems to position it at RAMIN+0x20000 on these
* cards. RAMFC is 4kb (32 fifos, 128byte entries).
* Others: Position RAMFC at RAMIN+0x11400
*/
switch(dev_priv->card_type)
{
case NV_50:
case NV_40:
case NV_44:
dev_priv->ramfc_offset = 0x20000;
dev_priv->ramfc_size = nouveau_fifo_number(dev) *
nouveau_fifo_ctx_size(dev);
break;
case NV_30:
case NV_20:
case NV_10:
case NV_04:
case NV_03:
default:
dev_priv->ramfc_offset = 0x11400;
dev_priv->ramfc_size = nouveau_fifo_number(dev) *
nouveau_fifo_ctx_size(dev);
break;
}
DRM_DEBUG("RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset,
dev_priv->ramfc_size);
}
int nouveau_instmem_init(struct drm_device *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
uint32_t offset;
int ret = 0;
nouveau_instmem_determine_amount(dev);
nouveau_instmem_configure_fixed_tables(dev);
/* Create a heap to manage RAMIN allocations, we don't allocate
* the space that was reserved for RAMHT/FC/RO.
*/
offset = dev_priv->ramfc_offset + dev_priv->ramfc_size;
ret = init_heap(&dev_priv->ramin_heap,
offset, dev_priv->ramin_size - offset);
if (ret) {
dev_priv->ramin_heap = NULL;
DRM_ERROR("Failed to init RAMIN heap\n");
}
return ret;
}
struct mem_block *nouveau_instmem_alloc(struct drm_device *dev,
uint32_t size, uint32_t align)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct mem_block *block;
if (!dev_priv->ramin_heap) {
DRM_ERROR("instmem alloc called without init\n");
return NULL;
}
block = alloc_block(dev_priv->ramin_heap, size, align, (DRMFILE)-2);
if (block) {
block->flags = NOUVEAU_MEM_INSTANCE;
DRM_DEBUG("instance(size=%d, align=%d) alloc'd at 0x%08x\n",
size, (1<<align), (uint32_t)block->start);
}
return block;
}
void nouveau_instmem_free(struct drm_device *dev, struct mem_block *block)
{
if (dev && block) {
free_block(block);
}
}
uint32_t nouveau_instmem_r32(drm_nouveau_private_t *dev_priv,
struct mem_block *mem, int index)
{
uint32_t ofs = (uint32_t)mem->start + (index<<2);
if (dev_priv->ramin) {
#if defined(__powerpc__)
return in_be32((void __iomem *)(dev_priv->ramin)->handle + ofs);
#else
return DRM_READ32(dev_priv->ramin, ofs);
#endif
} else {
return NV_READ(NV_RAMIN+ofs);
}
}
void nouveau_instmem_w32(drm_nouveau_private_t *dev_priv,
struct mem_block *mem, int index, uint32_t val)
{
uint32_t ofs = (uint32_t)mem->start + (index<<2);
if (dev_priv->ramin) {
#if defined(__powerpc__)
out_be32((void __iomem *)(dev_priv->ramin)->handle + ofs, val);
#else
DRM_WRITE32(dev_priv->ramin, ofs, val);
#endif
} else {
NV_WRITE(NV_RAMIN+ofs, val);
}
nouveau_mem_free_block(block);
}
/*

View File

@ -0,0 +1,153 @@
/*
* Copyright (C) 2007 Ben Skeggs.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
int
nouveau_notifier_init_channel(drm_device_t *dev, int channel, DRMFILE filp)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
int flags, ret;
/*TODO: PCI notifier blocks */
if (dev_priv->agp_heap)
flags = NOUVEAU_MEM_AGP | NOUVEAU_MEM_FB_ACCEPTABLE;
else
flags = NOUVEAU_MEM_FB;
flags |= NOUVEAU_MEM_MAPPED;
chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags,filp);
if (!chan->notifier_block)
return DRM_ERR(ENOMEM);
ret = nouveau_mem_init_heap(&chan->notifier_heap,
0, chan->notifier_block->size);
if (ret)
return ret;
return 0;
}
void
nouveau_notifier_takedown_channel(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
if (chan->notifier_block) {
nouveau_mem_free(dev, chan->notifier_block);
chan->notifier_block = NULL;
}
/*XXX: heap destroy */
}
int
nouveau_notifier_alloc(drm_device_t *dev, int channel, uint32_t handle,
int count, uint32_t *b_offset)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
nouveau_gpuobj_t *nobj = NULL;
struct mem_block *mem;
uint32_t offset;
int target, ret;
if (!chan->notifier_heap) {
DRM_ERROR("Channel %d doesn't have a notifier heap!\n",
channel);
return DRM_ERR(EINVAL);
}
mem = nouveau_mem_alloc_block(chan->notifier_heap, 32, 0, chan->filp);
if (!mem) {
DRM_ERROR("Channel %d notifier block full\n", channel);
return DRM_ERR(ENOMEM);
}
mem->flags = NOUVEAU_MEM_NOTIFIER;
offset = chan->notifier_block->start + mem->start;
if (chan->notifier_block->flags & NOUVEAU_MEM_FB) {
offset -= drm_get_resource_start(dev, 1);
target = NV_DMA_TARGET_VIDMEM;
} else if (chan->notifier_block->flags & NOUVEAU_MEM_AGP) {
offset -= dev_priv->agp_phys;
target = NV_DMA_TARGET_AGP;
} else {
DRM_ERROR("Bad DMA target, flags 0x%08x!\n",
chan->notifier_block->flags);
return DRM_ERR(EINVAL);
}
if ((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY,
offset, mem->size,
NV_DMA_ACCESS_RW, target, &nobj))) {
nouveau_mem_free_block(mem);
DRM_ERROR("Error creating notifier ctxdma: %d\n", ret);
return ret;
}
if ((ret = nouveau_gpuobj_ref_add(dev, channel, handle, nobj, NULL))) {
nouveau_gpuobj_del(dev, &nobj);
nouveau_mem_free_block(mem);
DRM_ERROR("Error referencing notifier ctxdma: %d\n", ret);
return ret;
}
*b_offset = mem->start;
return 0;
}
int
nouveau_ioctl_notifier_alloc(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_nouveau_notifier_alloc_t na;
int ret;
DRM_COPY_FROM_USER_IOCTL(na, (drm_nouveau_notifier_alloc_t __user*)data,
sizeof(na));
if (!nouveau_fifo_owner(dev, filp, na.channel)) {
DRM_ERROR("pid %d doesn't own channel %d\n",
DRM_CURRENTPID, na.channel);
return DRM_ERR(EPERM);
}
ret = nouveau_notifier_alloc(dev, na.channel, na.handle,
na.count, &na.offset);
if (ret)
return ret;
DRM_COPY_TO_USER_IOCTL((drm_nouveau_notifier_alloc_t __user*)data,
na, sizeof(na));
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -39,6 +39,8 @@
#define NV_DMA_TARGET_VIDMEM 0
#define NV_DMA_TARGET_PCI 2
#define NV_DMA_TARGET_AGP 3
/*The following is not a real value used by nvidia cards, it's changed by nouveau_object_dma_create*/
#define NV_DMA_TARGET_PCI_NONLINEAR 8
/* Some object classes we care about in the drm */
#define NV_CLASS_DMA_FROM_MEMORY 0x00000002
@ -47,11 +49,15 @@
#define NV_CLASS_DMA_IN_MEMORY 0x0000003D
#define NV03_FIFO_SIZE 0x8000UL
#define NV_MAX_FIFO_NUMBER 32
#define NV_MAX_FIFO_NUMBER 128
#define NV03_FIFO_REGS_SIZE 0x10000
#define NV03_FIFO_REGS(i) (0x00800000+i*NV03_FIFO_REGS_SIZE)
# define NV03_FIFO_REGS_DMAPUT(i) (NV03_FIFO_REGS(i)+0x40)
# define NV03_FIFO_REGS_DMAGET(i) (NV03_FIFO_REGS(i)+0x44)
#define NV50_FIFO_REGS_SIZE 0x2000
#define NV50_FIFO_REGS(i) (0x00c00000+i*NV50_FIFO_REGS_SIZE)
# define NV50_FIFO_REGS_DMAPUT(i) (NV50_FIFO_REGS(i)+0x40)
# define NV50_FIFO_REGS_DMAGET(i) (NV50_FIFO_REGS(i)+0x44)
#define NV03_PMC_BOOT_0 0x00000000
#define NV03_PMC_INTR_0 0x00000100
@ -135,6 +141,17 @@
#define NV10_PGRAPH_CTX_CACHE4 0x004001C0
#define NV04_PGRAPH_CTX_CACHE4 0x004001E0
#define NV10_PGRAPH_CTX_CACHE5 0x004001E0
#define NV40_PGRAPH_CTXCTL_0304 0x00400304
#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001
#define NV40_PGRAPH_CTXCTL_0310 0x00400310
#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020
#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040
#define NV40_PGRAPH_CTXCTL_030C 0x0040030c
#define NV40_PGRAPH_CTXCTL_UCODE_INDEX 0x00400324
#define NV40_PGRAPH_CTXCTL_UCODE_DATA 0x00400328
#define NV40_PGRAPH_CTXCTL_CUR 0x0040032c
#define NV40_PGRAPH_CTXCTL_CUR_LOADED 0x01000000
#define NV40_PGRAPH_CTXCTL_CUR_INST_MASK 0x000FFFFF
#define NV03_PGRAPH_ABS_X_RAM 0x00400400
#define NV03_PGRAPH_ABS_Y_RAM 0x00400480
#define NV03_PGRAPH_X_MISC 0x00400500
@ -230,7 +247,11 @@
#define NV10_PGRAPH_SCALED_FORMAT 0x00400778
#define NV10_PGRAPH_CHANNEL_CTX_TABLE 0x00400780
#define NV10_PGRAPH_CHANNEL_CTX_SIZE 0x00400784
#define NV20_PGRAPH_CHANNEL_CTX_POINTER 0x00400784
#define NV10_PGRAPH_CHANNEL_CTX_POINTER 0x00400788
#define NV20_PGRAPH_CHANNEL_CTX_XFER 0x00400788
#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD 0x00000001
#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE 0x00000002
#define NV04_PGRAPH_PATT_COLOR0 0x00400800
#define NV04_PGRAPH_PATT_COLOR1 0x00400804
#define NV04_PGRAPH_PATTERN 0x00400808
@ -317,6 +338,12 @@
#define NV04_PFIFO_MODE 0x00002504
#define NV04_PFIFO_DMA 0x00002508
#define NV04_PFIFO_SIZE 0x0000250c
#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4)
#define NV50_PFIFO_CTX_TABLE__SIZE 128
#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31)
#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30)
#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF
#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF
#define NV03_PFIFO_CACHE0_PUSH0 0x00003000
#define NV03_PFIFO_CACHE0_PULL0 0x00003040
#define NV04_PFIFO_CACHE0_PULL0 0x00003050
@ -404,7 +431,7 @@
#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C
#define NV03_PFIFO_CACHE1_GET 0x00003270
#define NV04_PFIFO_CACHE1_ENGINE 0x00003280
#define NV10_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0
#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0
#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0
#define NV40_PFIFO_UNK32E4 0x000032E4
#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8))
@ -427,7 +454,10 @@
#define NV04_RAMFC_DMA_PUT 0x00
#define NV04_RAMFC_DMA_GET 0x04
#define NV04_RAMFC_DMA_INSTANCE 0x08
#define NV04_RAMFC_DMA_STATE 0x0C
#define NV04_RAMFC_DMA_FETCH 0x10
#define NV04_RAMFC_ENGINE 0x14
#define NV04_RAMFC_PULL1_ENGINE 0x18
#define NV10_RAMFC_DMA_PUT 0x00
#define NV10_RAMFC_DMA_GET 0x04
@ -462,6 +492,6 @@
#define NV40_RAMFC_UNK_40 0x40
#define NV40_RAMFC_UNK_44 0x44
#define NV40_RAMFC_UNK_48 0x48
#define NV40_RAMFC_2088 0x4C
#define NV40_RAMFC_3300 0x50
#define NV40_RAMFC_UNK_4C 0x4C
#define NV40_RAMFC_UNK_50 0x50

View File

@ -51,6 +51,7 @@ static int nouveau_init_card_mappings(drm_device_t *dev)
DRM_DEBUG("regs mapped ok at 0x%lx\n", dev_priv->mmio->offset);
/* map larger RAMIN aperture on NV40 cards */
dev_priv->ramin = NULL;
if (dev_priv->card_type >= NV_40) {
int ramin_resource = 2;
if (drm_get_resource_len(dev, ramin_resource) == 0)
@ -66,12 +67,26 @@ static int nouveau_init_card_mappings(drm_device_t *dev)
"limited instance memory available\n");
dev_priv->ramin = NULL;
}
} else
dev_priv->ramin = NULL;
}
/* On older cards (or if the above failed), create a map covering
* the BAR0 PRAMIN aperture */
if (!dev_priv->ramin) {
ret = drm_addmap(dev,
drm_get_resource_start(dev, 0) + NV_RAMIN,
(1*1024*1024),
_DRM_REGISTERS, _DRM_READ_ONLY,
&dev_priv->ramin);
if (ret) {
DRM_ERROR("Failed to map BAR0 PRAMIN: %d\n", ret);
return ret;
}
}
return 0;
}
static int nouveau_stub_init(drm_device_t *dev) { return 0; }
static void nouveau_stub_takedown(drm_device_t *dev) {}
static int nouveau_init_engine_ptrs(drm_device_t *dev)
{
@ -80,66 +95,162 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev)
switch (dev_priv->chipset & 0xf0) {
case 0x00:
engine->Mc.Init = nv04_mc_init;
engine->Mc.Takedown = nv04_mc_takedown;
engine->Timer.Init = nv04_timer_init;
engine->Timer.Takedown = nv04_timer_takedown;
engine->Fb.Init = nv04_fb_init;
engine->Fb.Takedown = nv04_fb_takedown;
engine->Graph.Init = nv04_graph_init;
engine->Graph.Takedown = nv04_graph_takedown;
engine->Fifo.Init = nouveau_fifo_init;
engine->Fifo.Takedown = nouveau_stub_takedown;
engine->instmem.init = nv04_instmem_init;
engine->instmem.takedown= nv04_instmem_takedown;
engine->instmem.populate = nv04_instmem_populate;
engine->instmem.clear = nv04_instmem_clear;
engine->instmem.bind = nv04_instmem_bind;
engine->instmem.unbind = nv04_instmem_unbind;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
engine->timer.init = nv04_timer_init;
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv04_fb_init;
engine->fb.takedown = nv04_fb_takedown;
engine->graph.init = nv04_graph_init;
engine->graph.takedown = nv04_graph_takedown;
engine->graph.create_context = nv04_graph_create_context;
engine->graph.destroy_context = nv04_graph_destroy_context;
engine->graph.load_context = nv04_graph_load_context;
engine->graph.save_context = nv04_graph_save_context;
engine->fifo.init = nouveau_fifo_init;
engine->fifo.takedown = nouveau_stub_takedown;
engine->fifo.create_context = nv04_fifo_create_context;
engine->fifo.destroy_context = nv04_fifo_destroy_context;
engine->fifo.load_context = nv04_fifo_load_context;
engine->fifo.save_context = nv04_fifo_save_context;
break;
case 0x10:
engine->Mc.Init = nv04_mc_init;
engine->Mc.Takedown = nv04_mc_takedown;
engine->Timer.Init = nv04_timer_init;
engine->Timer.Takedown = nv04_timer_takedown;
engine->Fb.Init = nv10_fb_init;
engine->Fb.Takedown = nv10_fb_takedown;
engine->Graph.Init = nv10_graph_init;
engine->Graph.Takedown = nv10_graph_takedown;
engine->Fifo.Init = nouveau_fifo_init;
engine->Fifo.Takedown = nouveau_stub_takedown;
engine->instmem.init = nv04_instmem_init;
engine->instmem.takedown= nv04_instmem_takedown;
engine->instmem.populate = nv04_instmem_populate;
engine->instmem.clear = nv04_instmem_clear;
engine->instmem.bind = nv04_instmem_bind;
engine->instmem.unbind = nv04_instmem_unbind;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
engine->timer.init = nv04_timer_init;
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv10_fb_init;
engine->fb.takedown = nv10_fb_takedown;
engine->graph.init = nv10_graph_init;
engine->graph.takedown = nv10_graph_takedown;
engine->graph.create_context = nv10_graph_create_context;
engine->graph.destroy_context = nv10_graph_destroy_context;
engine->graph.load_context = nv10_graph_load_context;
engine->graph.save_context = nv10_graph_save_context;
engine->fifo.init = nouveau_fifo_init;
engine->fifo.takedown = nouveau_stub_takedown;
engine->fifo.create_context = nv10_fifo_create_context;
engine->fifo.destroy_context = nv10_fifo_destroy_context;
engine->fifo.load_context = nv10_fifo_load_context;
engine->fifo.save_context = nv10_fifo_save_context;
break;
case 0x20:
engine->Mc.Init = nv04_mc_init;
engine->Mc.Takedown = nv04_mc_takedown;
engine->Timer.Init = nv04_timer_init;
engine->Timer.Takedown = nv04_timer_takedown;
engine->Fb.Init = nv10_fb_init;
engine->Fb.Takedown = nv10_fb_takedown;
engine->Graph.Init = nv20_graph_init;
engine->Graph.Takedown = nv20_graph_takedown;
engine->Fifo.Init = nouveau_fifo_init;
engine->Fifo.Takedown = nouveau_stub_takedown;
engine->instmem.init = nv04_instmem_init;
engine->instmem.takedown= nv04_instmem_takedown;
engine->instmem.populate = nv04_instmem_populate;
engine->instmem.clear = nv04_instmem_clear;
engine->instmem.bind = nv04_instmem_bind;
engine->instmem.unbind = nv04_instmem_unbind;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
engine->timer.init = nv04_timer_init;
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv10_fb_init;
engine->fb.takedown = nv10_fb_takedown;
engine->graph.init = nv20_graph_init;
engine->graph.takedown = nv20_graph_takedown;
engine->graph.create_context = nv20_graph_create_context;
engine->graph.destroy_context = nv20_graph_destroy_context;
engine->graph.load_context = nv20_graph_load_context;
engine->graph.save_context = nv20_graph_save_context;
engine->fifo.init = nouveau_fifo_init;
engine->fifo.takedown = nouveau_stub_takedown;
engine->fifo.create_context = nv10_fifo_create_context;
engine->fifo.destroy_context = nv10_fifo_destroy_context;
engine->fifo.load_context = nv10_fifo_load_context;
engine->fifo.save_context = nv10_fifo_save_context;
break;
case 0x30:
engine->Mc.Init = nv04_mc_init;
engine->Mc.Takedown = nv04_mc_takedown;
engine->Timer.Init = nv04_timer_init;
engine->Timer.Takedown = nv04_timer_takedown;
engine->Fb.Init = nv10_fb_init;
engine->Fb.Takedown = nv10_fb_takedown;
engine->Graph.Init = nv30_graph_init;
engine->Graph.Takedown = nv30_graph_takedown;
engine->Fifo.Init = nouveau_fifo_init;
engine->Fifo.Takedown = nouveau_stub_takedown;
engine->instmem.init = nv04_instmem_init;
engine->instmem.takedown= nv04_instmem_takedown;
engine->instmem.populate = nv04_instmem_populate;
engine->instmem.clear = nv04_instmem_clear;
engine->instmem.bind = nv04_instmem_bind;
engine->instmem.unbind = nv04_instmem_unbind;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
engine->timer.init = nv04_timer_init;
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv10_fb_init;
engine->fb.takedown = nv10_fb_takedown;
engine->graph.init = nv30_graph_init;
engine->graph.takedown = nv30_graph_takedown;
engine->graph.create_context = nv30_graph_create_context;
engine->graph.destroy_context = nv30_graph_destroy_context;
engine->graph.load_context = nv30_graph_load_context;
engine->graph.save_context = nv30_graph_save_context;
engine->fifo.init = nouveau_fifo_init;
engine->fifo.takedown = nouveau_stub_takedown;
engine->fifo.create_context = nv10_fifo_create_context;
engine->fifo.destroy_context = nv10_fifo_destroy_context;
engine->fifo.load_context = nv10_fifo_load_context;
engine->fifo.save_context = nv10_fifo_save_context;
break;
case 0x40:
engine->Mc.Init = nv40_mc_init;
engine->Mc.Takedown = nv40_mc_takedown;
engine->Timer.Init = nv04_timer_init;
engine->Timer.Takedown = nv04_timer_takedown;
engine->Fb.Init = nv40_fb_init;
engine->Fb.Takedown = nv40_fb_takedown;
engine->Graph.Init = nv40_graph_init;
engine->Graph.Takedown = nv40_graph_takedown;
engine->Fifo.Init = nouveau_fifo_init;
engine->Fifo.Takedown = nouveau_stub_takedown;
engine->instmem.init = nv04_instmem_init;
engine->instmem.takedown= nv04_instmem_takedown;
engine->instmem.populate = nv04_instmem_populate;
engine->instmem.clear = nv04_instmem_clear;
engine->instmem.bind = nv04_instmem_bind;
engine->instmem.unbind = nv04_instmem_unbind;
engine->mc.init = nv40_mc_init;
engine->mc.takedown = nv40_mc_takedown;
engine->timer.init = nv04_timer_init;
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv40_fb_init;
engine->fb.takedown = nv40_fb_takedown;
engine->graph.init = nv40_graph_init;
engine->graph.takedown = nv40_graph_takedown;
engine->graph.create_context = nv40_graph_create_context;
engine->graph.destroy_context = nv40_graph_destroy_context;
engine->graph.load_context = nv40_graph_load_context;
engine->graph.save_context = nv40_graph_save_context;
engine->fifo.init = nouveau_fifo_init;
engine->fifo.takedown = nouveau_stub_takedown;
engine->fifo.create_context = nv40_fifo_create_context;
engine->fifo.destroy_context = nv40_fifo_destroy_context;
engine->fifo.load_context = nv40_fifo_load_context;
engine->fifo.save_context = nv40_fifo_save_context;
break;
case 0x50:
case 0x80: /* gotta love NVIDIA's consistency.. */
engine->instmem.init = nv50_instmem_init;
engine->instmem.takedown= nv50_instmem_takedown;
engine->instmem.populate = nv50_instmem_populate;
engine->instmem.clear = nv50_instmem_clear;
engine->instmem.bind = nv50_instmem_bind;
engine->instmem.unbind = nv50_instmem_unbind;
engine->mc.init = nv50_mc_init;
engine->mc.takedown = nv50_mc_takedown;
engine->timer.init = nouveau_stub_init;
engine->timer.takedown = nouveau_stub_takedown;
engine->fb.init = nouveau_stub_init;
engine->fb.takedown = nouveau_stub_takedown;
engine->graph.init = nv50_graph_init;
engine->graph.takedown = nv50_graph_takedown;
engine->graph.create_context = nv50_graph_create_context;
engine->graph.destroy_context = nv50_graph_destroy_context;
engine->graph.load_context = nv50_graph_load_context;
engine->graph.save_context = nv50_graph_save_context;
engine->fifo.init = nv50_fifo_init;
engine->fifo.takedown = nv50_fifo_takedown;
engine->fifo.create_context = nv50_fifo_create_context;
engine->fifo.destroy_context = nv50_fifo_destroy_context;
engine->fifo.load_context = nv50_fifo_load_context;
engine->fifo.save_context = nv50_fifo_save_context;
break;
default:
DRM_ERROR("NV%02x unsupported\n", dev_priv->chipset);
return 1;
@ -169,12 +280,13 @@ static int nouveau_card_init(drm_device_t *dev)
ret = nouveau_init_engine_ptrs(dev);
if (ret) return ret;
engine = &dev_priv->Engine;
dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
/* Initialise instance memory, must happen before mem_init so we
* know exactly how much VRAM we're able to use for "normal"
* purposes.
*/
ret = nouveau_instmem_init(dev);
ret = engine->instmem.init(dev);
if (ret) return ret;
/* Setup the memory manager */
@ -184,38 +296,59 @@ static int nouveau_card_init(drm_device_t *dev)
/* Parse BIOS tables / Run init tables? */
/* PMC */
ret = engine->Mc.Init(dev);
ret = engine->mc.init(dev);
if (ret) return ret;
/* PTIMER */
ret = engine->Timer.Init(dev);
ret = engine->timer.init(dev);
if (ret) return ret;
/* PFB */
ret = engine->Fb.Init(dev);
ret = engine->fb.init(dev);
if (ret) return ret;
/* PGRAPH */
ret = engine->Graph.Init(dev);
ret = engine->graph.init(dev);
if (ret) return ret;
/* PFIFO */
ret = engine->Fifo.Init(dev);
ret = engine->fifo.init(dev);
if (ret) return ret;
/* what about PVIDEO/PCRTC/PRAMDAC etc? */
dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
return 0;
}
static void nouveau_card_takedown(drm_device_t *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
nouveau_engine_func_t *engine = &dev_priv->Engine;
if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
engine->fifo.takedown(dev);
engine->graph.takedown(dev);
engine->fb.takedown(dev);
engine->timer.takedown(dev);
engine->mc.takedown(dev);
nouveau_gpuobj_takedown(dev);
nouveau_mem_close(dev);
engine->instmem.takedown(dev);
dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
}
}
/* here a client dies, release the stuff that was allocated for its filp */
void nouveau_preclose(drm_device_t * dev, DRMFILE filp)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
nouveau_fifo_cleanup(dev, filp);
nouveau_mem_release(filp,dev_priv->fb_heap);
nouveau_mem_release(filp,dev_priv->agp_heap);
nouveau_fifo_cleanup(dev, filp);
nouveau_mem_release(filp,dev_priv->pci_heap);
}
/* first module load, setup the mmio/fb mapping */
@ -235,18 +368,17 @@ int nouveau_firstopen(struct drm_device *dev)
int nouveau_load(struct drm_device *dev, unsigned long flags)
{
drm_nouveau_private_t *dev_priv;
int ret;
if (flags==NV_UNKNOWN)
return DRM_ERR(EINVAL);
dev_priv = drm_alloc(sizeof(drm_nouveau_private_t), DRM_MEM_DRIVER);
dev_priv = drm_calloc(1, sizeof(*dev_priv), DRM_MEM_DRIVER);
if (!dev_priv)
return DRM_ERR(ENOMEM);
memset(dev_priv, 0, sizeof(drm_nouveau_private_t));
dev_priv->card_type=flags&NOUVEAU_FAMILY;
dev_priv->flags=flags&NOUVEAU_FLAGS;
dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
dev->dev_private = (void *)dev_priv;
@ -264,6 +396,9 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
void nouveau_lastclose(struct drm_device *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
nouveau_card_takedown(dev);
if(dev_priv->fb_mtrr>0)
{
drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),nouveau_mem_fb_amount(dev), DRM_MTRR_WC);
@ -308,6 +443,15 @@ int nouveau_ioctl_getparam(DRM_IOCTL_ARGS)
case NOUVEAU_GETPARAM_AGP_PHYSICAL:
getparam.value=dev_priv->agp_phys;
break;
case NOUVEAU_GETPARAM_PCI_PHYSICAL:
if ( dev -> sg )
getparam.value=dev->sg->virtual;
else
{
DRM_ERROR("Requested PCIGART address, while no PCIGART was created\n");
DRM_ERR(EINVAL);
}
break;
case NOUVEAU_GETPARAM_FB_SIZE:
getparam.value=dev_priv->fb_available_size;
break;
@ -338,6 +482,8 @@ int nouveau_ioctl_setparam(DRM_IOCTL_ARGS)
switch (setparam.value) {
case NOUVEAU_MEM_AGP:
case NOUVEAU_MEM_FB:
case NOUVEAU_MEM_PCI:
case NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI_ACCEPTABLE:
break;
default:
DRM_ERROR("invalid CMDBUF_LOCATION value=%lld\n",
@ -366,6 +512,8 @@ void nouveau_wait_for_idle(struct drm_device *dev)
case NV_03:
while(NV_READ(NV03_PGRAPH_STATUS));
break;
case NV_50:
break;
default:
while(NV_READ(NV04_PGRAPH_STATUS));
break;

130
shared-core/nv04_fifo.c Normal file
View File

@ -0,0 +1,130 @@
/*
* Copyright (C) 2007 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#define RAMFC_WR(offset,val) INSTANCE_WR(chan->ramfc->gpuobj, \
NV04_RAMFC_##offset/4, (val))
#define RAMFC_RD(offset) INSTANCE_RD(chan->ramfc->gpuobj, \
NV04_RAMFC_##offset/4)
#define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE))
#define NV04_RAMFC__SIZE 32
int
nv04_fifo_create_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
int ret;
if ((ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(channel),
NV04_RAMFC__SIZE,
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE,
NULL, &chan->ramfc)))
return ret;
/* Setup initial state */
RAMFC_WR(DMA_PUT, chan->pushbuf_base);
RAMFC_WR(DMA_GET, chan->pushbuf_base);
RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4);
RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
0));
/* enable the fifo dma operation */
NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<<channel));
return 0;
}
void
nv04_fifo_destroy_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<channel));
if (chan->ramfc)
nouveau_gpuobj_ref_del(dev, &chan->ramfc);
}
int
nv04_fifo_load_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t tmp;
NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, (1<<8) | channel);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, RAMFC_RD(DMA_GET));
NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, RAMFC_RD(DMA_PUT));
tmp = RAMFC_RD(DMA_INSTANCE);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, RAMFC_RD(DMA_STATE));
NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH, RAMFC_RD(DMA_FETCH));
NV_WRITE(NV04_PFIFO_CACHE1_ENGINE, RAMFC_RD(ENGINE));
NV_WRITE(NV04_PFIFO_CACHE1_PULL1, RAMFC_RD(PULL1_ENGINE));
/* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp);
return 0;
}
int
nv04_fifo_save_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t tmp;
RAMFC_WR(DMA_PUT, NV04_PFIFO_CACHE1_DMA_PUT);
RAMFC_WR(DMA_GET, NV04_PFIFO_CACHE1_DMA_GET);
tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
tmp |= NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE);
RAMFC_WR(DMA_INSTANCE, tmp);
RAMFC_WR(DMA_STATE, NV_READ(NV04_PFIFO_CACHE1_DMA_STATE));
RAMFC_WR(DMA_FETCH, NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH));
RAMFC_WR(ENGINE, NV_READ(NV04_PFIFO_CACHE1_ENGINE));
RAMFC_WR(PULL1_ENGINE, NV_READ(NV04_PFIFO_CACHE1_PULL1));
return 0;
}

View File

@ -309,7 +309,7 @@ void nouveau_nv04_context_switch(drm_device_t *dev)
for (i = 0; i<sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
for (j = 0; j<nv04_graph_ctx_regs[i].number; j++)
{
dev_priv->fifos[channel_old].pgraph_ctx[index] = NV_READ(nv04_graph_ctx_regs[i].reg+j*4);
dev_priv->fifos[channel_old]->pgraph_ctx[index] = NV_READ(nv04_graph_ctx_regs[i].reg+j*4);
index++;
}
@ -321,7 +321,7 @@ void nouveau_nv04_context_switch(drm_device_t *dev)
for (i = 0; i<sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
for (j = 0; j<nv04_graph_ctx_regs[i].number; j++)
{
NV_WRITE(nv04_graph_ctx_regs[i].reg+j*4, dev_priv->fifos[channel].pgraph_ctx[index]);
NV_WRITE(nv04_graph_ctx_regs[i].reg+j*4, dev_priv->fifos[channel]->pgraph_ctx[index]);
index++;
}
@ -336,14 +336,14 @@ void nouveau_nv04_context_switch(drm_device_t *dev)
NV_WRITE(NV04_PGRAPH_FIFO,0x1);
}
int nv04_graph_context_create(drm_device_t *dev, int channel) {
int nv04_graph_create_context(drm_device_t *dev, int channel) {
drm_nouveau_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("nv04_graph_context_create %d\n", channel);
memset(dev_priv->fifos[channel].pgraph_ctx, 0, sizeof(dev_priv->fifos[channel].pgraph_ctx));
memset(dev_priv->fifos[channel]->pgraph_ctx, 0, sizeof(dev_priv->fifos[channel]->pgraph_ctx));
//dev_priv->fifos[channel].pgraph_ctx_user = channel << 24;
dev_priv->fifos[channel].pgraph_ctx[0] = 0x0001ffff;
dev_priv->fifos[channel]->pgraph_ctx[0] = 0x0001ffff;
/* is it really needed ??? */
//dev_priv->fifos[channel].pgraph_ctx[1] = NV_READ(NV_PGRAPH_DEBUG_4);
//dev_priv->fifos[channel].pgraph_ctx[2] = NV_READ(0x004006b0);
@ -351,6 +351,21 @@ int nv04_graph_context_create(drm_device_t *dev, int channel) {
return 0;
}
void nv04_graph_destroy_context(drm_device_t *dev, int channel)
{
}
int nv04_graph_load_context(drm_device_t *dev, int channel)
{
DRM_ERROR("stub!\n");
return 0;
}
int nv04_graph_save_context(drm_device_t *dev, int channel)
{
DRM_ERROR("stub!\n");
return 0;
}
int nv04_graph_init(drm_device_t *dev) {
drm_nouveau_private_t *dev_priv = dev->dev_private;
@ -364,7 +379,7 @@ int nv04_graph_init(drm_device_t *dev) {
// check the context is big enough
for ( i = 0 ; i<sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
sum+=nv04_graph_ctx_regs[i].number;
if ( sum*4>sizeof(dev_priv->fifos[0].pgraph_ctx) )
if ( sum*4>sizeof(dev_priv->fifos[0]->pgraph_ctx) )
DRM_ERROR("pgraph_ctx too small\n");
NV_WRITE(NV03_PGRAPH_INTR_EN, 0x00000000);

165
shared-core/nv04_instmem.c Normal file
View File

@ -0,0 +1,165 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
static void
nv04_instmem_determine_amount(struct drm_device *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
int i;
/* Figure out how much instance memory we need */
switch (dev_priv->card_type) {
case NV_40:
/* We'll want more instance memory than this on some NV4x cards.
* There's a 16MB aperture to play with that maps onto the end
* of vram. For now, only reserve a small piece until we know
* more about what each chipset requires.
*/
dev_priv->ramin_rsvd_vram = (1*1024* 1024);
break;
default:
/*XXX: what *are* the limits on <NV40 cards?, and does RAMIN
* exist in vram on those cards as well?
*/
dev_priv->ramin_rsvd_vram = (512*1024);
break;
}
DRM_DEBUG("RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram>>10);
/* Clear all of it, except the BIOS image that's in the first 64KiB */
for (i=(64*1024); i<dev_priv->ramin_rsvd_vram; i+=4)
NV_WI32(i, 0x00000000);
}
static void
nv04_instmem_configure_fixed_tables(struct drm_device *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
/* FIFO hash table (RAMHT)
* use 4k hash table at RAMIN+0x10000
* TODO: extend the hash table
*/
dev_priv->ramht_offset = 0x10000;
dev_priv->ramht_bits = 9;
dev_priv->ramht_size = (1 << dev_priv->ramht_bits);
DRM_DEBUG("RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset,
dev_priv->ramht_size);
/* FIFO runout table (RAMRO) - 512k at 0x11200 */
dev_priv->ramro_offset = 0x11200;
dev_priv->ramro_size = 512;
DRM_DEBUG("RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset,
dev_priv->ramro_size);
/* FIFO context table (RAMFC)
* NV40 : Not sure exactly how to position RAMFC on some cards,
* 0x30002 seems to position it at RAMIN+0x20000 on these
* cards. RAMFC is 4kb (32 fifos, 128byte entries).
* Others: Position RAMFC at RAMIN+0x11400
*/
switch(dev_priv->card_type)
{
case NV_40:
case NV_44:
dev_priv->ramfc_offset = 0x20000;
dev_priv->ramfc_size = nouveau_fifo_number(dev) *
nouveau_fifo_ctx_size(dev);
break;
case NV_30:
case NV_20:
case NV_17:
case NV_10:
case NV_04:
case NV_03:
default:
dev_priv->ramfc_offset = 0x11400;
dev_priv->ramfc_size = nouveau_fifo_number(dev) *
nouveau_fifo_ctx_size(dev);
break;
}
DRM_DEBUG("RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset,
dev_priv->ramfc_size);
}
int nv04_instmem_init(struct drm_device *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
uint32_t offset;
int ret = 0;
nv04_instmem_determine_amount(dev);
nv04_instmem_configure_fixed_tables(dev);
if ((ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset,
dev_priv->ramht_size,
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ALLOW_NO_REFS,
&dev_priv->ramht, NULL)))
return ret;
/* Create a heap to manage RAMIN allocations, we don't allocate
* the space that was reserved for RAMHT/FC/RO.
*/
offset = dev_priv->ramfc_offset + dev_priv->ramfc_size;
ret = nouveau_mem_init_heap(&dev_priv->ramin_heap,
offset, dev_priv->ramin_rsvd_vram - offset);
if (ret) {
dev_priv->ramin_heap = NULL;
DRM_ERROR("Failed to init RAMIN heap\n");
}
return ret;
}
void
nv04_instmem_takedown(drm_device_t *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
nouveau_gpuobj_del(dev, &dev_priv->ramht);
}
int
nv04_instmem_populate(drm_device_t *dev, nouveau_gpuobj_t *gpuobj, uint32_t *sz)
{
if (gpuobj->im_backing)
return DRM_ERR(EINVAL);
return 0;
}
void
nv04_instmem_clear(drm_device_t *dev, nouveau_gpuobj_t *gpuobj)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
if (gpuobj && gpuobj->im_backing) {
if (gpuobj->im_bound)
dev_priv->Engine.instmem.unbind(dev, gpuobj);
nouveau_mem_free(dev, gpuobj->im_backing);
gpuobj->im_backing = NULL;
}
}
int
nv04_instmem_bind(drm_device_t *dev, nouveau_gpuobj_t *gpuobj)
{
if (!gpuobj->im_pramin || gpuobj->im_bound)
return DRM_ERR(EINVAL);
gpuobj->im_bound = 1;
return 0;
}
int
nv04_instmem_unbind(drm_device_t *dev, nouveau_gpuobj_t *gpuobj)
{
if (gpuobj->im_bound == 0)
return DRM_ERR(EINVAL);
gpuobj->im_bound = 0;
return 0;
}

161
shared-core/nv10_fifo.c Normal file
View File

@ -0,0 +1,161 @@
/*
* Copyright (C) 2007 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#define RAMFC_WR(offset,val) INSTANCE_WR(chan->ramfc->gpuobj, \
NV10_RAMFC_##offset/4, (val))
#define RAMFC_RD(offset) INSTANCE_RD(chan->ramfc->gpuobj, \
NV10_RAMFC_##offset/4)
#define NV10_RAMFC(c) (dev_priv->ramfc_offset + NV10_RAMFC__SIZE)
#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32)
int
nv10_fifo_create_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
int ret;
if ((ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(channel),
NV10_RAMFC__SIZE,
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE,
NULL, &chan->ramfc)))
return ret;
/* Fill entries that are seen filled in dumps of nvidia driver just
* after channel's is put into DMA mode
*/
RAMFC_WR(DMA_PUT , chan->pushbuf_base);
RAMFC_WR(DMA_GET , chan->pushbuf_base);
RAMFC_WR(DMA_INSTANCE , chan->pushbuf->instance >> 4);
RAMFC_WR(DMA_FETCH , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
0);
/* enable the fifo dma operation */
NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<<channel));
return 0;
}
void
nv10_fifo_destroy_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<channel));
if (chan->ramfc)
nouveau_gpuobj_ref_del(dev, &chan->ramfc);
}
int
nv10_fifo_load_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t tmp;
NV_WRITE(NV03_PFIFO_CACHE1_PUSH1 , 0x00000100 | channel);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET));
NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT , RAMFC_RD(DMA_PUT));
NV_WRITE(NV10_PFIFO_CACHE1_REF_CNT , RAMFC_RD(REF_CNT));
tmp = RAMFC_RD(DMA_INSTANCE);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE , tmp & 0xFFFF);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT , tmp >> 16);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE , RAMFC_RD(DMA_STATE));
NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH , RAMFC_RD(DMA_FETCH));
NV_WRITE(NV04_PFIFO_CACHE1_ENGINE , RAMFC_RD(ENGINE));
NV_WRITE(NV04_PFIFO_CACHE1_PULL1 , RAMFC_RD(PULL1_ENGINE));
if (dev_priv->chipset >= 0x17) {
NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_VALUE,
RAMFC_RD(ACQUIRE_VALUE));
NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP,
RAMFC_RD(ACQUIRE_TIMESTAMP));
NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT,
RAMFC_RD(ACQUIRE_TIMEOUT));
NV_WRITE(NV10_PFIFO_CACHE1_SEMAPHORE,
RAMFC_RD(SEMAPHORE));
NV_WRITE(NV10_PFIFO_CACHE1_DMA_SUBROUTINE,
RAMFC_RD(DMA_SUBROUTINE));
}
/* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp);
return 0;
}
int
nv10_fifo_save_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t tmp;
RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT));
RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
RAMFC_WR(REF_CNT , NV_READ(NV10_PFIFO_CACHE1_REF_CNT));
tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE) & 0xFFFF;
tmp |= (NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16);
RAMFC_WR(DMA_INSTANCE , tmp);
RAMFC_WR(DMA_STATE , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE));
RAMFC_WR(DMA_FETCH , NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH));
RAMFC_WR(ENGINE , NV_READ(NV04_PFIFO_CACHE1_ENGINE));
RAMFC_WR(PULL1_ENGINE , NV_READ(NV04_PFIFO_CACHE1_PULL1));
if (dev_priv->chipset >= 0x17) {
RAMFC_WR(ACQUIRE_VALUE,
NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
RAMFC_WR(ACQUIRE_TIMESTAMP,
NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP));
RAMFC_WR(ACQUIRE_TIMEOUT,
NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
RAMFC_WR(SEMAPHORE,
NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE));
RAMFC_WR(DMA_SUBROUTINE,
NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
}
return 0;
}

View File

@ -547,7 +547,7 @@ static int nv10_graph_ctx_regs_find_offset(drm_device_t *dev, int reg)
static void restore_ctx_regs(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *fifo = &dev_priv->fifos[channel];
struct nouveau_fifo *fifo = dev_priv->fifos[channel];
int i, j;
for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++)
NV_WRITE(nv10_graph_ctx_regs[i], fifo->pgraph_ctx[i]);
@ -577,10 +577,10 @@ void nouveau_nv10_context_switch(drm_device_t *dev)
// save PGRAPH context
for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++)
dev_priv->fifos[channel_old].pgraph_ctx[i] = NV_READ(nv10_graph_ctx_regs[i]);
dev_priv->fifos[channel_old]->pgraph_ctx[i] = NV_READ(nv10_graph_ctx_regs[i]);
if (dev_priv->chipset>=0x17) {
for (j = 0; j < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++,j++)
dev_priv->fifos[channel_old].pgraph_ctx[i] = NV_READ(nv17_graph_ctx_regs[j]);
dev_priv->fifos[channel_old]->pgraph_ctx[i] = NV_READ(nv17_graph_ctx_regs[j]);
}
nouveau_wait_for_idle(dev);
@ -611,9 +611,9 @@ void nouveau_nv10_context_switch(drm_device_t *dev)
if (offset > 0) \
fifo->pgraph_ctx[offset] = val; \
} while (0)
int nv10_graph_context_create(drm_device_t *dev, int channel) {
int nv10_graph_create_context(drm_device_t *dev, int channel) {
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *fifo = &dev_priv->fifos[channel];
struct nouveau_fifo *fifo = dev_priv->fifos[channel];
uint32_t tmp, vramsz;
DRM_DEBUG("nv10_graph_context_create %d\n", channel);
@ -640,6 +640,10 @@ int nv10_graph_context_create(drm_device_t *dev, int channel) {
NV_WRITE_CTX(NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
NV_WRITE_CTX(NV04_PGRAPH_BETA_AND , 0xFFFFFFFF);
NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
/* is it really needed ??? */
@ -659,6 +663,21 @@ int nv10_graph_context_create(drm_device_t *dev, int channel) {
return 0;
}
void nv10_graph_destroy_context(drm_device_t *dev, int channel)
{
}
int nv10_graph_load_context(drm_device_t *dev, int channel)
{
DRM_ERROR("stub!\n");
return 0;
}
int nv10_graph_save_context(drm_device_t *dev, int channel)
{
DRM_ERROR("stub!\n");
return 0;
}
int nv10_graph_init(drm_device_t *dev) {
drm_nouveau_private_t *dev_priv = dev->dev_private;

View File

@ -29,28 +29,36 @@
#define NV20_GRCTX_SIZE (3529*4)
int nv20_graph_context_create(drm_device_t *dev, int channel) {
int nv20_graph_create_context(drm_device_t *dev, int channel) {
drm_nouveau_private_t *dev_priv =
(drm_nouveau_private_t *)dev->dev_private;
struct nouveau_fifo *chan = &dev_priv->fifos[channel];
struct nouveau_fifo *chan = dev_priv->fifos[channel];
unsigned int ctx_size = NV20_GRCTX_SIZE;
int i;
int ret;
/* Alloc and clear RAMIN to store the context */
chan->ramin_grctx = nouveau_instmem_alloc(dev, ctx_size, 4);
if (!chan->ramin_grctx)
return DRM_ERR(ENOMEM);
for (i=0; i<ctx_size; i+=4)
INSTANCE_WR(chan->ramin_grctx, i/4, 0x00000000);
if ((ret = nouveau_gpuobj_new_ref(dev, channel, -1, 0, ctx_size, 16,
NVOBJ_FLAG_ZERO_ALLOC,
&chan->ramin_grctx)))
return ret;
/* Initialise default context values */
INSTANCE_WR(chan->ramin_grctx, 10, channel << 24); /* CTX_USER */
INSTANCE_WR(dev_priv->ctx_table, channel, nouveau_chip_instance_get(dev, chan->ramin_grctx));
INSTANCE_WR(chan->ramin_grctx->gpuobj, 10, channel<<24); /* CTX_USER */
INSTANCE_WR(dev_priv->ctx_table->gpuobj, channel,
chan->ramin_grctx->instance >> 4);
return 0;
}
void nv20_graph_destroy_context(drm_device_t *dev, int channel) {
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
if (chan->ramin_grctx)
nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
INSTANCE_WR(dev_priv->ctx_table->gpuobj, channel, 0);
}
static void nv20_graph_rdi(drm_device_t *dev) {
drm_nouveau_private_t *dev_priv =
(drm_nouveau_private_t *)dev->dev_private;
@ -65,40 +73,44 @@ static void nv20_graph_rdi(drm_device_t *dev) {
/* Save current context (from PGRAPH) into the channel's context
*/
static void nv20_graph_context_save_current(drm_device_t *dev, int channel) {
int nv20_graph_save_context(drm_device_t *dev, int channel) {
drm_nouveau_private_t *dev_priv =
(drm_nouveau_private_t *)dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t instance;
instance = INSTANCE_RD(dev_priv->ctx_table, channel);
instance = INSTANCE_RD(dev_priv->ctx_table->gpuobj, channel);
if (!instance) {
return;
return DRM_ERR(EINVAL);
}
if (instance != nouveau_chip_instance_get(dev, dev_priv->fifos[channel].ramin_grctx))
DRM_ERROR("nv20_graph_context_save_current : bad instance\n");
if (instance != (chan->ramin_grctx->instance >> 4))
DRM_ERROR("nv20_graph_save_context : bad instance\n");
NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_SIZE, instance);
NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_POINTER, 2 /* save ctx */);
return 0;
}
/* Restore the context for a specific channel into PGRAPH
*/
static void nv20_graph_context_restore(drm_device_t *dev, int channel) {
int nv20_graph_load_context(drm_device_t *dev, int channel) {
drm_nouveau_private_t *dev_priv =
(drm_nouveau_private_t *)dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t instance;
instance = INSTANCE_RD(dev_priv->ctx_table, channel);
instance = INSTANCE_RD(dev_priv->ctx_table->gpuobj, channel);
if (!instance) {
return;
return DRM_ERR(EINVAL);
}
if (instance != nouveau_chip_instance_get(dev, dev_priv->fifos[channel].ramin_grctx))
DRM_ERROR("nv20_graph_context_restore_current : bad instance\n");
if (instance != (chan->ramin_grctx->instance >> 4))
DRM_ERROR("nv20_graph_load_context_current : bad instance\n");
NV_WRITE(NV10_PGRAPH_CTX_USER, channel << 24);
NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_SIZE, instance);
NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_POINTER, 1 /* restore ctx */);
return 0;
}
void nouveau_nv20_context_switch(drm_device_t *dev)
@ -113,13 +125,13 @@ void nouveau_nv20_context_switch(drm_device_t *dev)
NV_WRITE(NV04_PGRAPH_FIFO,0x0);
nv20_graph_context_save_current(dev, channel_old);
nv20_graph_save_context(dev, channel_old);
nouveau_wait_for_idle(dev);
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000000);
nv20_graph_context_restore(dev, channel);
nv20_graph_load_context(dev, channel);
nouveau_wait_for_idle(dev);
@ -135,8 +147,8 @@ void nouveau_nv20_context_switch(drm_device_t *dev)
int nv20_graph_init(drm_device_t *dev) {
drm_nouveau_private_t *dev_priv =
(drm_nouveau_private_t *)dev->dev_private;
int i;
uint32_t tmp, vramsz;
int ret, i;
NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
~NV_PMC_ENABLE_PGRAPH);
@ -145,14 +157,14 @@ int nv20_graph_init(drm_device_t *dev) {
/* Create Context Pointer Table */
dev_priv->ctx_table_size = 32 * 4;
dev_priv->ctx_table = nouveau_instmem_alloc(dev, dev_priv->ctx_table_size, 4);
if (!dev_priv->ctx_table)
return DRM_ERR(ENOMEM);
if ((ret = nouveau_gpuobj_new_ref(dev, -1, -1, 0,
dev_priv->ctx_table_size, 16,
NVOBJ_FLAG_ZERO_ALLOC,
&dev_priv->ctx_table)))
return ret;
for (i=0; i< dev_priv->ctx_table_size; i+=4)
INSTANCE_WR(dev_priv->ctx_table, i/4, 0x00000000);
NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_TABLE, nouveau_chip_instance_get(dev, dev_priv->ctx_table));
NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_TABLE,
dev_priv->ctx_table->instance >> 4);
//XXX need to be done and save/restore for each fifo ???
nv20_graph_rdi(dev);

View File

@ -16,7 +16,7 @@
* contexts are taken from dumps just after the 3D object is
* created.
*/
static void nv30_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
static void nv30_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
int i;
@ -100,14 +100,14 @@ static void nv30_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
}
int nv30_graph_context_create(drm_device_t *dev, int channel)
int nv30_graph_create_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv =
(drm_nouveau_private_t *)dev->dev_private;
struct nouveau_fifo *chan = &dev_priv->fifos[channel];
void (*ctx_init)(drm_device_t *, struct mem_block *);
struct nouveau_fifo *chan = dev_priv->fifos[channel];
void (*ctx_init)(drm_device_t *, nouveau_gpuobj_t *);
unsigned int ctx_size;
int i;
int ret;
switch (dev_priv->chipset) {
default:
@ -116,28 +116,91 @@ int nv30_graph_context_create(drm_device_t *dev, int channel)
break;
}
/* Alloc and clear RAMIN to store the context */
chan->ramin_grctx = nouveau_instmem_alloc(dev, ctx_size, 4);
if (!chan->ramin_grctx)
return DRM_ERR(ENOMEM);
for (i=0; i<ctx_size; i+=4)
INSTANCE_WR(chan->ramin_grctx, i/4, 0x00000000);
if ((ret = nouveau_gpuobj_new_ref(dev, channel, -1, 0, ctx_size, 16,
NVOBJ_FLAG_ZERO_ALLOC,
&chan->ramin_grctx)))
return ret;
/* Initialise default context values */
ctx_init(dev, chan->ramin_grctx);
ctx_init(dev, chan->ramin_grctx->gpuobj);
INSTANCE_WR(chan->ramin_grctx, 10, channel << 24); /* CTX_USER */
INSTANCE_WR(dev_priv->ctx_table, channel, nouveau_chip_instance_get(dev, chan->ramin_grctx));
INSTANCE_WR(chan->ramin_grctx->gpuobj, 10, channel<<24); /* CTX_USER */
INSTANCE_WR(dev_priv->ctx_table->gpuobj, channel,
chan->ramin_grctx->instance >> 4);
return 0;
}
void nv30_graph_destroy_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv =
(drm_nouveau_private_t *)dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
if (chan->ramin_grctx)
nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
INSTANCE_WR(dev_priv->ctx_table->gpuobj, channel, 0);
}
static int
nouveau_graph_wait_idle(drm_device_t *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
int tv = 1000;
while (tv--) {
if (NV_READ(0x400700) == 0)
break;
}
if (NV_READ(0x400700)) {
DRM_ERROR("timeout!\n");
return DRM_ERR(EBUSY);
}
return 0;
}
int nv30_graph_load_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t inst;
if (!chan->ramin_grctx)
return DRM_ERR(EINVAL);
inst = chan->ramin_grctx->instance >> 4;
NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER,
NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD);
return nouveau_graph_wait_idle(dev);
}
int nv30_graph_save_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t inst;
if (!chan->ramin_grctx)
return DRM_ERR(EINVAL);
inst = chan->ramin_grctx->instance >> 4;
NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER,
NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
return nouveau_graph_wait_idle(dev);
}
int nv30_graph_init(drm_device_t *dev)
{
drm_nouveau_private_t *dev_priv =
(drm_nouveau_private_t *)dev->dev_private;
uint32_t vramsz, tmp;
int i;
int ret, i;
NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
~NV_PMC_ENABLE_PGRAPH);
@ -146,14 +209,14 @@ int nv30_graph_init(drm_device_t *dev)
/* Create Context Pointer Table */
dev_priv->ctx_table_size = 32 * 4;
dev_priv->ctx_table = nouveau_instmem_alloc(dev, dev_priv->ctx_table_size, 4);
if (!dev_priv->ctx_table)
return DRM_ERR(ENOMEM);
if ((ret = nouveau_gpuobj_new_ref(dev, -1, -1, 0,
dev_priv->ctx_table_size, 16,
NVOBJ_FLAG_ZERO_ALLOC,
&dev_priv->ctx_table)))
return ret;
for (i=0; i< dev_priv->ctx_table_size; i+=4)
INSTANCE_WR(dev_priv->ctx_table, i/4, 0x00000000);
NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_TABLE, nouveau_chip_instance_get(dev, dev_priv->ctx_table));
NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_TABLE,
dev_priv->ctx_table->instance >> 4);
NV_WRITE(NV03_PGRAPH_INTR_EN, 0x00000000);
NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF);

195
shared-core/nv40_fifo.c Normal file
View File

@ -0,0 +1,195 @@
/*
* Copyright (C) 2007 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#define RAMFC_WR(offset,val) INSTANCE_WR(chan->ramfc->gpuobj, \
NV40_RAMFC_##offset/4, (val))
#define RAMFC_RD(offset) INSTANCE_RD(chan->ramfc->gpuobj, \
NV40_RAMFC_##offset/4)
#define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c)*NV40_RAMFC__SIZE))
#define NV40_RAMFC__SIZE 128
int
nv40_fifo_create_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
int ret;
if ((ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(channel),
NV40_RAMFC__SIZE,
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE,
NULL, &chan->ramfc)))
return ret;
/* Fill entries that are seen filled in dumps of nvidia driver just
* after channel's is put into DMA mode
*/
RAMFC_WR(DMA_PUT , chan->pushbuf_base);
RAMFC_WR(DMA_GET , chan->pushbuf_base);
RAMFC_WR(DMA_INSTANCE , chan->pushbuf->instance >> 4);
RAMFC_WR(DMA_FETCH , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
0x30000000 /* no idea.. */);
RAMFC_WR(DMA_SUBROUTINE, 0);
RAMFC_WR(GRCTX_INSTANCE, chan->ramin_grctx->instance >> 4);
RAMFC_WR(DMA_TIMESLICE , 0x0001FFFF);
/* enable the fifo dma operation */
NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<<channel));
return 0;
}
void
nv40_fifo_destroy_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<channel));
if (chan->ramfc)
nouveau_gpuobj_ref_del(dev, &chan->ramfc);
}
int
nv40_fifo_load_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t tmp, tmp2;
NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET));
NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT , RAMFC_RD(DMA_PUT));
NV_WRITE(NV10_PFIFO_CACHE1_REF_CNT , RAMFC_RD(REF_CNT));
NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE , RAMFC_RD(DMA_INSTANCE));
NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT , RAMFC_RD(DMA_DCOUNT));
NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE , RAMFC_RD(DMA_STATE));
/* No idea what 0x2058 is.. */
tmp = RAMFC_RD(DMA_FETCH);
tmp2 = NV_READ(0x2058) & 0xFFF;
tmp2 |= (tmp & 0x30000000);
NV_WRITE(0x2058, tmp2);
tmp &= ~0x30000000;
NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH , tmp);
NV_WRITE(NV04_PFIFO_CACHE1_ENGINE , RAMFC_RD(ENGINE));
NV_WRITE(NV04_PFIFO_CACHE1_PULL1 , RAMFC_RD(PULL1_ENGINE));
NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_VALUE , RAMFC_RD(ACQUIRE_VALUE));
NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, RAMFC_RD(ACQUIRE_TIMESTAMP));
NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT , RAMFC_RD(ACQUIRE_TIMEOUT));
NV_WRITE(NV10_PFIFO_CACHE1_SEMAPHORE , RAMFC_RD(SEMAPHORE));
NV_WRITE(NV10_PFIFO_CACHE1_DMA_SUBROUTINE , RAMFC_RD(DMA_SUBROUTINE));
NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE , RAMFC_RD(GRCTX_INSTANCE));
NV_WRITE(0x32e4, RAMFC_RD(UNK_40));
/* NVIDIA does this next line twice... */
NV_WRITE(0x32e8, RAMFC_RD(UNK_44));
NV_WRITE(0x2088, RAMFC_RD(UNK_4C));
NV_WRITE(0x3300, RAMFC_RD(UNK_50));
/* not sure what part is PUT, and which is GET.. never seen a non-zero
* value appear in a mmio-trace yet..
*/
#if 0
tmp = NV_READ(UNK_84);
NV_WRITE(NV_PFIFO_CACHE1_GET, tmp ???);
NV_WRITE(NV_PFIFO_CACHE1_PUT, tmp ???);
#endif
/* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */
tmp = NV_READ(NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF;
tmp |= RAMFC_RD(DMA_TIMESLICE) & 0x1FFFF;
NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, tmp);
/* Set channel active, and in DMA mode */
NV_WRITE(NV03_PFIFO_CACHE1_PUSH1 , 0x00010000 | channel);
/* Reset DMA_CTL_AT_INFO to INVALID */
tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp);
return 0;
}
int
nv40_fifo_save_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t tmp;
RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT));
RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
RAMFC_WR(REF_CNT , NV_READ(NV10_PFIFO_CACHE1_REF_CNT));
RAMFC_WR(DMA_INSTANCE , NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE));
RAMFC_WR(DMA_DCOUNT , NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT));
RAMFC_WR(DMA_STATE , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE));
tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH);
tmp |= NV_READ(0x2058) & 0x30000000;
RAMFC_WR(DMA_FETCH , tmp);
RAMFC_WR(ENGINE , NV_READ(NV04_PFIFO_CACHE1_ENGINE));
RAMFC_WR(PULL1_ENGINE , NV_READ(NV04_PFIFO_CACHE1_PULL1));
RAMFC_WR(ACQUIRE_VALUE , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
tmp = NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
RAMFC_WR(ACQUIRE_TIMESTAMP, tmp);
RAMFC_WR(ACQUIRE_TIMEOUT , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
RAMFC_WR(SEMAPHORE , NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE));
/* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something
* more involved depending on the value of 0x3228?
*/
RAMFC_WR(DMA_SUBROUTINE , NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
RAMFC_WR(GRCTX_INSTANCE , NV_READ(NV40_PFIFO_GRCTX_INSTANCE));
/* No idea what the below is for exactly, ripped from a mmio-trace */
RAMFC_WR(UNK_40 , NV_READ(NV40_PFIFO_UNK32E4));
/* NVIDIA do this next line twice.. bug? */
RAMFC_WR(UNK_44 , NV_READ(0x32e8));
RAMFC_WR(UNK_4C , NV_READ(0x2088));
RAMFC_WR(UNK_50 , NV_READ(0x3300));
#if 0 /* no real idea which is PUT/GET in UNK_48.. */
tmp = NV_READ(NV04_PFIFO_CACHE1_GET);
tmp |= (NV_READ(NV04_PFIFO_CACHE1_PUT) << 16);
RAMFC_WR(UNK_48 , tmp);
#endif
return 0;
}

View File

@ -1,7 +1,32 @@
/*
* Copyright (C) 2007 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
/* The sizes are taken from the difference between the start of two
* grctx addresses while running the nvidia driver. Probably slightly
@ -11,7 +36,9 @@
#define NV40_GRCTX_SIZE (175*1024)
#define NV43_GRCTX_SIZE (70*1024)
#define NV46_GRCTX_SIZE (70*1024) /* probably ~64KiB */
#define NV49_GRCTX_SIZE (164640)
#define NV4A_GRCTX_SIZE (64*1024)
#define NV4B_GRCTX_SIZE (164640)
#define NV4C_GRCTX_SIZE (25*1024)
#define NV4E_GRCTX_SIZE (25*1024)
@ -19,13 +46,14 @@
* contexts are taken from dumps just after the 3D object is
* created.
*/
static void nv40_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
static void
nv40_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
int i;
/* Always has the "instance address" of itself at offset 0 */
INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx));
INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
/* unknown */
INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
@ -160,12 +188,12 @@ static void nv40_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
}
static void
nv43_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
nv43_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
int i;
INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx));
INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00030/4, 0x00000001);
@ -275,12 +303,13 @@ nv43_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
INSTANCE_WR(ctx, i/4, 0x3f800000);
};
static void nv46_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
static void
nv46_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
int i;
INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx));
INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
INSTANCE_WR(ctx, 0x00040/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00044/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x0004c/4, 0x00000001);
@ -425,12 +454,236 @@ static void nv46_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
INSTANCE_WR(ctx, i/4, 0x3f800000);
}
static void nv4a_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
static void
nv49_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
int i;
INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx));
INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
INSTANCE_WR(ctx, 0x00004/4, 0x0000c040);
INSTANCE_WR(ctx, 0x00008/4, 0x0000c040);
INSTANCE_WR(ctx, 0x0000c/4, 0x0000c040);
INSTANCE_WR(ctx, 0x00010/4, 0x0000c040);
INSTANCE_WR(ctx, 0x00014/4, 0x0000c040);
INSTANCE_WR(ctx, 0x00018/4, 0x0000c040);
INSTANCE_WR(ctx, 0x0001c/4, 0x0000c040);
INSTANCE_WR(ctx, 0x00020/4, 0x0000c040);
INSTANCE_WR(ctx, 0x000c4/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x000c8/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x000d0/4, 0x00000001);
INSTANCE_WR(ctx, 0x001bc/4, 0x20010001);
INSTANCE_WR(ctx, 0x001c0/4, 0x0f73ef00);
INSTANCE_WR(ctx, 0x001c8/4, 0x02008821);
INSTANCE_WR(ctx, 0x00218/4, 0x00000040);
INSTANCE_WR(ctx, 0x0021c/4, 0x00000040);
INSTANCE_WR(ctx, 0x00220/4, 0x00000040);
INSTANCE_WR(ctx, 0x00228/4, 0x00000040);
INSTANCE_WR(ctx, 0x00234/4, 0x80000000);
INSTANCE_WR(ctx, 0x00238/4, 0x80000000);
INSTANCE_WR(ctx, 0x0023c/4, 0x80000000);
INSTANCE_WR(ctx, 0x00240/4, 0x80000000);
INSTANCE_WR(ctx, 0x00244/4, 0x80000000);
INSTANCE_WR(ctx, 0x00248/4, 0x80000000);
INSTANCE_WR(ctx, 0x0024c/4, 0x80000000);
INSTANCE_WR(ctx, 0x00250/4, 0x80000000);
INSTANCE_WR(ctx, 0x00270/4, 0x0b0b0b0c);
INSTANCE_WR(ctx, 0x003e0/4, 0x00040000);
INSTANCE_WR(ctx, 0x003f0/4, 0x55555555);
INSTANCE_WR(ctx, 0x003f4/4, 0x55555555);
INSTANCE_WR(ctx, 0x003f8/4, 0x55555555);
INSTANCE_WR(ctx, 0x003fc/4, 0x55555555);
INSTANCE_WR(ctx, 0x00428/4, 0x00000008);
INSTANCE_WR(ctx, 0x0043c/4, 0x00001010);
INSTANCE_WR(ctx, 0x00460/4, 0x00000111);
INSTANCE_WR(ctx, 0x00464/4, 0x00000111);
INSTANCE_WR(ctx, 0x00468/4, 0x00000111);
INSTANCE_WR(ctx, 0x0046c/4, 0x00000111);
INSTANCE_WR(ctx, 0x00470/4, 0x00000111);
INSTANCE_WR(ctx, 0x00474/4, 0x00000111);
INSTANCE_WR(ctx, 0x00478/4, 0x00000111);
INSTANCE_WR(ctx, 0x0047c/4, 0x00000111);
INSTANCE_WR(ctx, 0x00480/4, 0x00000111);
INSTANCE_WR(ctx, 0x00484/4, 0x00000111);
INSTANCE_WR(ctx, 0x00488/4, 0x00000111);
INSTANCE_WR(ctx, 0x0048c/4, 0x00000111);
INSTANCE_WR(ctx, 0x00490/4, 0x00000111);
INSTANCE_WR(ctx, 0x00494/4, 0x00000111);
INSTANCE_WR(ctx, 0x00498/4, 0x00000111);
INSTANCE_WR(ctx, 0x0049c/4, 0x00000111);
INSTANCE_WR(ctx, 0x004f4/4, 0x00000111);
INSTANCE_WR(ctx, 0x004f8/4, 0x00080060);
INSTANCE_WR(ctx, 0x00514/4, 0x00000080);
INSTANCE_WR(ctx, 0x00518/4, 0xffff0000);
INSTANCE_WR(ctx, 0x0051c/4, 0x00000001);
INSTANCE_WR(ctx, 0x00530/4, 0x46400000);
INSTANCE_WR(ctx, 0x00540/4, 0xffff0000);
INSTANCE_WR(ctx, 0x00544/4, 0x88888888);
INSTANCE_WR(ctx, 0x00548/4, 0x88888888);
INSTANCE_WR(ctx, 0x0054c/4, 0x88888888);
INSTANCE_WR(ctx, 0x00550/4, 0x88888888);
INSTANCE_WR(ctx, 0x00554/4, 0x88888888);
INSTANCE_WR(ctx, 0x00558/4, 0x88888888);
INSTANCE_WR(ctx, 0x0055c/4, 0x88888888);
INSTANCE_WR(ctx, 0x00560/4, 0x88888888);
INSTANCE_WR(ctx, 0x00564/4, 0x88888888);
INSTANCE_WR(ctx, 0x00568/4, 0x88888888);
INSTANCE_WR(ctx, 0x0056c/4, 0x88888888);
INSTANCE_WR(ctx, 0x00570/4, 0x88888888);
INSTANCE_WR(ctx, 0x00574/4, 0x88888888);
INSTANCE_WR(ctx, 0x00578/4, 0x88888888);
INSTANCE_WR(ctx, 0x0057c/4, 0x88888888);
INSTANCE_WR(ctx, 0x00580/4, 0x88888888);
INSTANCE_WR(ctx, 0x00594/4, 0x0fff0000);
INSTANCE_WR(ctx, 0x00598/4, 0x0fff0000);
INSTANCE_WR(ctx, 0x005a0/4, 0x00011100);
INSTANCE_WR(ctx, 0x005bc/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005c0/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005c4/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005c8/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005cc/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005d0/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005d4/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005d8/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005dc/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005e0/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005e4/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005e8/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005ec/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005f0/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005f4/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005f8/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x00604/4, 0x4b7fffff);
INSTANCE_WR(ctx, 0x0062c/4, 0x30201000);
INSTANCE_WR(ctx, 0x00630/4, 0x70605040);
INSTANCE_WR(ctx, 0x00634/4, 0xb8a89888);
INSTANCE_WR(ctx, 0x00638/4, 0xf8e8d8c8);
INSTANCE_WR(ctx, 0x0064c/4, 0x40100000);
INSTANCE_WR(ctx, 0x00668/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x0069c/4, 0x435185d6);
INSTANCE_WR(ctx, 0x006a0/4, 0x2155b699);
INSTANCE_WR(ctx, 0x006a4/4, 0xfedcba98);
INSTANCE_WR(ctx, 0x006a8/4, 0x00000098);
INSTANCE_WR(ctx, 0x006b8/4, 0xffffffff);
INSTANCE_WR(ctx, 0x006bc/4, 0x00ff7000);
INSTANCE_WR(ctx, 0x006c0/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x006d0/4, 0x00ff0000);
INSTANCE_WR(ctx, 0x0070c/4, 0x00ffff00);
for (i=0x00750; i<=0x0078c; i+=4)
INSTANCE_WR(ctx, i/4, 0x00018488);
for (i=0x00790; i<=0x007cc; i+=4)
INSTANCE_WR(ctx, i/4, 0x00028202);
for (i=0x00810; i<=0x0084c; i+=4)
INSTANCE_WR(ctx, i/4, 0x0000aae4);
for (i=0x00850; i<=0x0088c; i+=4)
INSTANCE_WR(ctx, i/4, 0x01012000);
for (i=0x00890; i<=0x008cc; i+=4)
INSTANCE_WR(ctx, i/4, 0x00080008);
for (i=0x00910; i<=0x0094c; i+=4)
INSTANCE_WR(ctx, i/4, 0x00100008);
for (i=0x009a0; i<=0x009ac; i+=4)
INSTANCE_WR(ctx, i/4, 0x0001bc80);
for (i=0x009b0; i<=0x009bc; i+=4)
INSTANCE_WR(ctx, i/4, 0x00000202);
for (i=0x009d0; i<=0x009dc; i+=4)
INSTANCE_WR(ctx, i/4, 0x00000008);
for (i=0x009f0; i<=0x009fc; i+=4)
INSTANCE_WR(ctx, i/4, 0x00080008);
INSTANCE_WR(ctx, 0x00a10/4, 0x00000002);
INSTANCE_WR(ctx, 0x00a44/4, 0x00000421);
INSTANCE_WR(ctx, 0x00a48/4, 0x030c30c3);
INSTANCE_WR(ctx, 0x00a54/4, 0x3e020200);
INSTANCE_WR(ctx, 0x00a58/4, 0x00ffffff);
INSTANCE_WR(ctx, 0x00a5c/4, 0x20103f00);
INSTANCE_WR(ctx, 0x00a68/4, 0x00040000);
INSTANCE_WR(ctx, 0x00aa0/4, 0x00008100);
INSTANCE_WR(ctx, 0x00b2c/4, 0x00000001);
INSTANCE_WR(ctx, 0x00b70/4, 0x00001001);
INSTANCE_WR(ctx, 0x00b7c/4, 0x00000003);
INSTANCE_WR(ctx, 0x00b80/4, 0x00888001);
INSTANCE_WR(ctx, 0x00bb0/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bb4/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bb8/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bbc/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bc0/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bc4/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bc8/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bcc/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bd0/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bd4/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bd8/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bdc/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00be0/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00be4/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00be8/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bec/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bf0/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bf4/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bf8/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bfc/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c00/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c04/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c08/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c0c/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c10/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c14/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c18/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c1c/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c20/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c24/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c28/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c2c/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c54/4, 0x00000005);
INSTANCE_WR(ctx, 0x00c60/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00c7c/4, 0x00005555);
INSTANCE_WR(ctx, 0x00c80/4, 0x00005555);
INSTANCE_WR(ctx, 0x00c84/4, 0x00005555);
INSTANCE_WR(ctx, 0x00c88/4, 0x00005555);
INSTANCE_WR(ctx, 0x00c8c/4, 0x00005555);
INSTANCE_WR(ctx, 0x00c90/4, 0x00005555);
INSTANCE_WR(ctx, 0x00c94/4, 0x00005555);
INSTANCE_WR(ctx, 0x00c98/4, 0x00005555);
INSTANCE_WR(ctx, 0x00c9c/4, 0x00000001);
INSTANCE_WR(ctx, 0x00cd4/4, 0x00000001);
INSTANCE_WR(ctx, 0x00cd8/4, 0x08e00001);
INSTANCE_WR(ctx, 0x00cdc/4, 0x000e3000);
for(i=0x030a0; i<=0x03118; i+=8)
INSTANCE_WR(ctx, i/4, 0x3f800000);
for(i=0x098a0; i<=0x0ba90; i+=24)
INSTANCE_WR(ctx, i/4, 0x00000001);
for(i=0x0baa0; i<=0x0be90; i+=16)
INSTANCE_WR(ctx, i/4, 0x3f800000);
for(i=0x0e2e0; i<=0x0fff0; i+=24)
INSTANCE_WR(ctx, i/4, 0x00000001);
for(i=0x10008; i<=0x104d0; i+=24)
INSTANCE_WR(ctx, i/4, 0x00000001);
for(i=0x104e0; i<=0x108d0; i+=16)
INSTANCE_WR(ctx, i/4, 0x3f800000);
for(i=0x12d20; i<=0x14f10; i+=24)
INSTANCE_WR(ctx, i/4, 0x00000001);
for(i=0x14f20; i<=0x15310; i+=16)
INSTANCE_WR(ctx, i/4, 0x3f800000);
for(i=0x17760; i<=0x19950; i+=24)
INSTANCE_WR(ctx, i/4, 0x00000001);
for(i=0x19960; i<=0x19d50; i+=16)
INSTANCE_WR(ctx, i/4, 0x3f800000);
for(i=0x1c1a0; i<=0x1e390; i+=24)
INSTANCE_WR(ctx, i/4, 0x00000001);
for(i=0x1e3a0; i<=0x1e790; i+=16)
INSTANCE_WR(ctx, i/4, 0x3f800000);
for(i=0x20be0; i<=0x22dd0; i+=24)
INSTANCE_WR(ctx, i/4, 0x00000001);
for(i=0x22de0; i<=0x231d0; i+=16)
INSTANCE_WR(ctx, i/4, 0x3f800000);
}
static void
nv4a_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
int i;
INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00030/4, 0x00000001);
@ -541,13 +794,228 @@ static void nv4a_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
INSTANCE_WR(ctx, i/4, 0x3f800000);
}
static void nv4c_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
static void
nv4b_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
int i;
INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx));
INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
INSTANCE_WR(ctx, 0x00004/4, 0x0000c040);
INSTANCE_WR(ctx, 0x00008/4, 0x0000c040);
INSTANCE_WR(ctx, 0x0000c/4, 0x0000c040);
INSTANCE_WR(ctx, 0x00010/4, 0x0000c040);
INSTANCE_WR(ctx, 0x00014/4, 0x0000c040);
INSTANCE_WR(ctx, 0x00018/4, 0x0000c040);
INSTANCE_WR(ctx, 0x0001c/4, 0x0000c040);
INSTANCE_WR(ctx, 0x00020/4, 0x0000c040);
INSTANCE_WR(ctx, 0x000c4/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x000c8/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x000d0/4, 0x00000001);
INSTANCE_WR(ctx, 0x001bc/4, 0x20010001);
INSTANCE_WR(ctx, 0x001c0/4, 0x0f73ef00);
INSTANCE_WR(ctx, 0x001c8/4, 0x02008821);
INSTANCE_WR(ctx, 0x00218/4, 0x00000040);
INSTANCE_WR(ctx, 0x0021c/4, 0x00000040);
INSTANCE_WR(ctx, 0x00220/4, 0x00000040);
INSTANCE_WR(ctx, 0x00228/4, 0x00000040);
INSTANCE_WR(ctx, 0x00234/4, 0x80000000);
INSTANCE_WR(ctx, 0x00238/4, 0x80000000);
INSTANCE_WR(ctx, 0x0023c/4, 0x80000000);
INSTANCE_WR(ctx, 0x00240/4, 0x80000000);
INSTANCE_WR(ctx, 0x00244/4, 0x80000000);
INSTANCE_WR(ctx, 0x00248/4, 0x80000000);
INSTANCE_WR(ctx, 0x0024c/4, 0x80000000);
INSTANCE_WR(ctx, 0x00250/4, 0x80000000);
INSTANCE_WR(ctx, 0x00270/4, 0x0b0b0b0c);
INSTANCE_WR(ctx, 0x003e0/4, 0x00040000);
INSTANCE_WR(ctx, 0x003f0/4, 0x55555555);
INSTANCE_WR(ctx, 0x003f4/4, 0x55555555);
INSTANCE_WR(ctx, 0x003f8/4, 0x55555555);
INSTANCE_WR(ctx, 0x003fc/4, 0x55555555);
INSTANCE_WR(ctx, 0x00428/4, 0x00000008);
INSTANCE_WR(ctx, 0x0043c/4, 0x00001010);
INSTANCE_WR(ctx, 0x00460/4, 0x00000111);
INSTANCE_WR(ctx, 0x00464/4, 0x00000111);
INSTANCE_WR(ctx, 0x00468/4, 0x00000111);
INSTANCE_WR(ctx, 0x0046c/4, 0x00000111);
INSTANCE_WR(ctx, 0x00470/4, 0x00000111);
INSTANCE_WR(ctx, 0x00474/4, 0x00000111);
INSTANCE_WR(ctx, 0x00478/4, 0x00000111);
INSTANCE_WR(ctx, 0x0047c/4, 0x00000111);
INSTANCE_WR(ctx, 0x00480/4, 0x00000111);
INSTANCE_WR(ctx, 0x00484/4, 0x00000111);
INSTANCE_WR(ctx, 0x00488/4, 0x00000111);
INSTANCE_WR(ctx, 0x0048c/4, 0x00000111);
INSTANCE_WR(ctx, 0x00490/4, 0x00000111);
INSTANCE_WR(ctx, 0x00494/4, 0x00000111);
INSTANCE_WR(ctx, 0x00498/4, 0x00000111);
INSTANCE_WR(ctx, 0x0049c/4, 0x00000111);
INSTANCE_WR(ctx, 0x004f4/4, 0x00000111);
INSTANCE_WR(ctx, 0x004f8/4, 0x00080060);
INSTANCE_WR(ctx, 0x00514/4, 0x00000080);
INSTANCE_WR(ctx, 0x00518/4, 0xffff0000);
INSTANCE_WR(ctx, 0x0051c/4, 0x00000001);
INSTANCE_WR(ctx, 0x00530/4, 0x46400000);
INSTANCE_WR(ctx, 0x00540/4, 0xffff0000);
INSTANCE_WR(ctx, 0x00544/4, 0x88888888);
INSTANCE_WR(ctx, 0x00548/4, 0x88888888);
INSTANCE_WR(ctx, 0x0054c/4, 0x88888888);
INSTANCE_WR(ctx, 0x00550/4, 0x88888888);
INSTANCE_WR(ctx, 0x00554/4, 0x88888888);
INSTANCE_WR(ctx, 0x00558/4, 0x88888888);
INSTANCE_WR(ctx, 0x0055c/4, 0x88888888);
INSTANCE_WR(ctx, 0x00560/4, 0x88888888);
INSTANCE_WR(ctx, 0x00564/4, 0x88888888);
INSTANCE_WR(ctx, 0x00568/4, 0x88888888);
INSTANCE_WR(ctx, 0x0056c/4, 0x88888888);
INSTANCE_WR(ctx, 0x00570/4, 0x88888888);
INSTANCE_WR(ctx, 0x00574/4, 0x88888888);
INSTANCE_WR(ctx, 0x00578/4, 0x88888888);
INSTANCE_WR(ctx, 0x0057c/4, 0x88888888);
INSTANCE_WR(ctx, 0x00580/4, 0x88888888);
INSTANCE_WR(ctx, 0x00594/4, 0x0fff0000);
INSTANCE_WR(ctx, 0x00598/4, 0x0fff0000);
INSTANCE_WR(ctx, 0x005a0/4, 0x00011100);
INSTANCE_WR(ctx, 0x005bc/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005c0/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005c4/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005c8/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005cc/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005d0/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005d4/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005d8/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005dc/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005e0/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005e4/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005e8/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005ec/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005f0/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005f4/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x005f8/4, 0x07ff0000);
INSTANCE_WR(ctx, 0x00604/4, 0x4b7fffff);
INSTANCE_WR(ctx, 0x0062c/4, 0x30201000);
INSTANCE_WR(ctx, 0x00630/4, 0x70605040);
INSTANCE_WR(ctx, 0x00634/4, 0xb8a89888);
INSTANCE_WR(ctx, 0x00638/4, 0xf8e8d8c8);
INSTANCE_WR(ctx, 0x0064c/4, 0x40100000);
INSTANCE_WR(ctx, 0x00668/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x0069c/4, 0x435185d6);
INSTANCE_WR(ctx, 0x006a0/4, 0x2155b699);
INSTANCE_WR(ctx, 0x006a4/4, 0xfedcba98);
INSTANCE_WR(ctx, 0x006a8/4, 0x00000098);
INSTANCE_WR(ctx, 0x006b8/4, 0xffffffff);
INSTANCE_WR(ctx, 0x006bc/4, 0x00ff7000);
INSTANCE_WR(ctx, 0x006c0/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x006d0/4, 0x00ff0000);
INSTANCE_WR(ctx, 0x0070c/4, 0x00ffff00);
for (i=0x00750; i<=0x0078c; i+=4)
INSTANCE_WR(ctx, i/4, 0x00018488);
for (i=0x00790; i<=0x007cc; i+=4)
INSTANCE_WR(ctx, i/4, 0x00028202);
for (i=0x00810; i<=0x0084c; i+=4)
INSTANCE_WR(ctx, i/4, 0x0000aae4);
for (i=0x00850; i<=0x0088c; i+=4)
INSTANCE_WR(ctx, i/4, 0x01012000);
for (i=0x00890; i<=0x008cc; i+=4)
INSTANCE_WR(ctx, i/4, 0x00080008);
for (i=0x00910; i<=0x0094c; i+=4)
INSTANCE_WR(ctx, i/4, 0x00100008);
for (i=0x009a0; i<=0x009ac; i+=4)
INSTANCE_WR(ctx, i/4, 0x0001bc80);
for (i=0x009b0; i<=0x009bc; i+=4)
INSTANCE_WR(ctx, i/4, 0x00000202);
for (i=0x009d0; i<=0x009dc; i+=4)
INSTANCE_WR(ctx, i/4, 0x00000008);
for (i=0x009f0; i<=0x009fc; i+=4)
INSTANCE_WR(ctx, i/4, 0x00080008);
INSTANCE_WR(ctx, 0x00a10/4, 0x00000002);
INSTANCE_WR(ctx, 0x00a44/4, 0x00000421);
INSTANCE_WR(ctx, 0x00a48/4, 0x030c30c3);
INSTANCE_WR(ctx, 0x00a54/4, 0x3e020200);
INSTANCE_WR(ctx, 0x00a58/4, 0x00ffffff);
INSTANCE_WR(ctx, 0x00a5c/4, 0x20103f00);
INSTANCE_WR(ctx, 0x00a68/4, 0x00040000);
INSTANCE_WR(ctx, 0x00aa0/4, 0x00008100);
INSTANCE_WR(ctx, 0x00b2c/4, 0x00000001);
INSTANCE_WR(ctx, 0x00b70/4, 0x00001001);
INSTANCE_WR(ctx, 0x00b7c/4, 0x00000003);
INSTANCE_WR(ctx, 0x00b80/4, 0x00888001);
INSTANCE_WR(ctx, 0x00bb0/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bb4/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bb8/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bbc/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bc0/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bc4/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bc8/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bcc/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bd0/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bd4/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bd8/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bdc/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00be0/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00be4/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00be8/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bec/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bf0/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bf4/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bf8/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00bfc/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c00/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c04/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c08/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c0c/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c10/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c14/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c18/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c1c/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c20/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c24/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c28/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c2c/4, 0xffffffff);
INSTANCE_WR(ctx, 0x00c54/4, 0x00000005);
INSTANCE_WR(ctx, 0x00c60/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00c7c/4, 0x00005555);
INSTANCE_WR(ctx, 0x00c80/4, 0x00005555);
INSTANCE_WR(ctx, 0x00c84/4, 0x00005555);
INSTANCE_WR(ctx, 0x00c88/4, 0x00005555);
INSTANCE_WR(ctx, 0x00c8c/4, 0x00005555);
INSTANCE_WR(ctx, 0x00c90/4, 0x00005555);
INSTANCE_WR(ctx, 0x00c94/4, 0x00005555);
INSTANCE_WR(ctx, 0x00c98/4, 0x00005555);
INSTANCE_WR(ctx, 0x00c9c/4, 0x00000001);
INSTANCE_WR(ctx, 0x00cd4/4, 0x00000001);
INSTANCE_WR(ctx, 0x00cd8/4, 0x08e00001);
INSTANCE_WR(ctx, 0x00cdc/4, 0x000e3000);
for(i=0x030a0; i<=0x03118; i+=8)
INSTANCE_WR(ctx, i/4, 0x3f800000);
for(i=0x098a0; i<=0x0ba90; i+=24)
INSTANCE_WR(ctx, i/4, 0x00000001);
for(i=0x0baa0; i<=0x0be90; i+=16)
INSTANCE_WR(ctx, i/4, 0x3f800000);
for(i=0x0e2e0; i<=0x0fff0; i+=24)
INSTANCE_WR(ctx, i/4, 0x00000001);
for(i=0x10008; i<=0x104d0; i+=24)
INSTANCE_WR(ctx, i/4, 0x00000001);
for(i=0x104e0; i<=0x108d0; i+=16)
INSTANCE_WR(ctx, i/4, 0x3f800000);
for(i=0x12d20; i<=0x14f10; i+=24)
INSTANCE_WR(ctx, i/4, 0x00000001);
for(i=0x14f20; i<=0x15310; i+=16)
INSTANCE_WR(ctx, i/4, 0x3f800000);
for(i=0x17760; i<=0x19950; i+=24)
INSTANCE_WR(ctx, i/4, 0x00000001);
for(i=0x19960; i<=0x19d50; i+=16)
INSTANCE_WR(ctx, i/4, 0x3f800000);
}
static void
nv4c_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
int i;
INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00030/4, 0x00000001);
@ -648,12 +1116,13 @@ static void nv4c_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
INSTANCE_WR(ctx, i/4, 0x3f800000);
}
static void nv4e_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
static void
nv4e_graph_context_init(drm_device_t *dev, nouveau_gpuobj_t *ctx)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
int i;
INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx));
INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00030/4, 0x00000001);
@ -755,14 +1224,14 @@ static void nv4e_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
}
int
nv40_graph_context_create(drm_device_t *dev, int channel)
nv40_graph_create_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv =
(drm_nouveau_private_t *)dev->dev_private;
struct nouveau_fifo *chan = &dev_priv->fifos[channel];
void (*ctx_init)(drm_device_t *, struct mem_block *);
struct nouveau_fifo *chan = dev_priv->fifos[channel];
void (*ctx_init)(drm_device_t *, nouveau_gpuobj_t *);
unsigned int ctx_size;
int i;
int ret;
switch (dev_priv->chipset) {
case 0x40:
@ -777,10 +1246,18 @@ nv40_graph_context_create(drm_device_t *dev, int channel)
ctx_size = NV46_GRCTX_SIZE;
ctx_init = nv46_graph_context_init;
break;
case 0x49:
ctx_size = NV49_GRCTX_SIZE;
ctx_init = nv49_graph_context_init;
break;
case 0x4a:
ctx_size = NV4A_GRCTX_SIZE;
ctx_init = nv4a_graph_context_init;
break;
case 0x4b:
ctx_size = NV4B_GRCTX_SIZE;
ctx_init = nv4b_graph_context_init;
break;
case 0x4c:
ctx_size = NV4C_GRCTX_SIZE;
ctx_init = nv4c_graph_context_init;
@ -795,15 +1272,53 @@ nv40_graph_context_create(drm_device_t *dev, int channel)
break;
}
/* Alloc and clear RAMIN to store the context */
chan->ramin_grctx = nouveau_instmem_alloc(dev, ctx_size, 4);
if (!chan->ramin_grctx)
return DRM_ERR(ENOMEM);
for (i=0; i<ctx_size; i+=4)
INSTANCE_WR(chan->ramin_grctx, i/4, 0x00000000);
if ((ret = nouveau_gpuobj_new_ref(dev, channel, -1, 0, ctx_size, 16,
NVOBJ_FLAG_ZERO_ALLOC,
&chan->ramin_grctx)))
return ret;
/* Initialise default context values */
ctx_init(dev, chan->ramin_grctx);
ctx_init(dev, chan->ramin_grctx->gpuobj);
return 0;
}
void
nv40_graph_destroy_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
if (chan->ramin_grctx)
nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
}
static int
nv40_graph_transfer_context(drm_device_t *dev, uint32_t inst, int save)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
uint32_t old_cp, tv = 1000;
int i;
old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER);
NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
NV_WRITE(NV40_PGRAPH_CTXCTL_0310,
save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
NV40_PGRAPH_CTXCTL_0310_XFER_LOAD);
NV_WRITE(NV40_PGRAPH_CTXCTL_0304, NV40_PGRAPH_CTXCTL_0304_XFER_CTX);
for (i = 0; i < tv; i++) {
if (NV_READ(NV40_PGRAPH_CTXCTL_030C) == 0)
break;
}
NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
if (i == tv) {
DRM_ERROR("failed: inst=0x%08x save=%d\n", inst, save);
DRM_ERROR("0x40030C = 0x%08x\n",
NV_READ(NV40_PGRAPH_CTXCTL_030C));
return DRM_ERR(EBUSY);
}
return 0;
}
@ -811,86 +1326,54 @@ nv40_graph_context_create(drm_device_t *dev, int channel)
/* Save current context (from PGRAPH) into the channel's context
*XXX: fails sometimes, not sure why..
*/
void
nv40_graph_context_save_current(drm_device_t *dev)
int
nv40_graph_save_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv =
(drm_nouveau_private_t *)dev->dev_private;
uint32_t instance;
int i;
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t inst;
NV_WRITE(NV04_PGRAPH_FIFO, 0);
if (!chan->ramin_grctx)
return DRM_ERR(EINVAL);
inst = chan->ramin_grctx->instance >> 4;
instance = NV_READ(0x40032C) & 0xFFFFF;
if (!instance) {
NV_WRITE(NV04_PGRAPH_FIFO, 1);
return;
}
NV_WRITE(0x400784, instance);
NV_WRITE(0x400310, NV_READ(0x400310) | 0x20);
NV_WRITE(0x400304, 1);
/* just in case, we don't want to spin in-kernel forever */
for (i=0; i<1000; i++) {
if (NV_READ(0x40030C) == 0)
break;
}
if (i==1000) {
DRM_ERROR("failed to save current grctx to ramin\n");
DRM_ERROR("instance = 0x%08x\n", NV_READ(0x40032C));
DRM_ERROR("0x40030C = 0x%08x\n", NV_READ(0x40030C));
NV_WRITE(NV04_PGRAPH_FIFO, 1);
return;
}
NV_WRITE(NV04_PGRAPH_FIFO, 1);
return nv40_graph_transfer_context(dev, inst, 1);
}
/* Restore the context for a specific channel into PGRAPH
* XXX: fails sometimes.. not sure why
*/
void
nv40_graph_context_restore(drm_device_t *dev, int channel)
int
nv40_graph_load_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv =
(drm_nouveau_private_t *)dev->dev_private;
struct nouveau_fifo *chan = &dev_priv->fifos[channel];
uint32_t instance;
int i;
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t inst;
int ret;
instance = nouveau_chip_instance_get(dev, chan->ramin_grctx);
NV_WRITE(NV04_PGRAPH_FIFO, 0);
NV_WRITE(0x400784, instance);
NV_WRITE(0x400310, NV_READ(0x400310) | 0x40);
NV_WRITE(0x400304, 1);
/* just in case, we don't want to spin in-kernel forever */
for (i=0; i<1000; i++) {
if (NV_READ(0x40030C) == 0)
break;
}
if (i==1000) {
DRM_ERROR("failed to restore grctx for ch%d to PGRAPH\n",
channel);
DRM_ERROR("instance = 0x%08x\n", instance);
DRM_ERROR("0x40030C = 0x%08x\n", NV_READ(0x40030C));
NV_WRITE(NV04_PGRAPH_FIFO, 1);
return;
}
if (!chan->ramin_grctx)
return DRM_ERR(EINVAL);
inst = chan->ramin_grctx->instance >> 4;
ret = nv40_graph_transfer_context(dev, inst, 0);
if (ret)
return ret;
/* 0x40032C, no idea of it's exact function. Could simply be a
* record of the currently active PGRAPH context. It's currently
* unknown as to what bit 24 does. The nv ddx has it set, so we will
* set it here too.
*/
NV_WRITE(0x40032C, instance | 0x01000000);
NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
NV_WRITE(NV40_PGRAPH_CTXCTL_CUR,
(inst & NV40_PGRAPH_CTXCTL_CUR_INST_MASK) |
NV40_PGRAPH_CTXCTL_CUR_LOADED);
/* 0x32E0 records the instance address of the active FIFO's PGRAPH
* context. If at any time this doesn't match 0x40032C, you will
* recieve PGRAPH_INTR_CONTEXT_SWITCH
*/
NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE, instance);
NV_WRITE(NV04_PGRAPH_FIFO, 1);
NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE, inst);
return 0;
}
/* Some voodoo that makes context switching work without the binary driver
@ -1007,6 +1490,39 @@ static uint32_t nv46_ctx_voodoo[] = {
0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
};
//this is used for nv49 and nv4b
static uint32_t nv49_4b_ctx_voodoo[] ={
0x00400564, 0x00400505, 0x00408165, 0x00408206, 0x00409e68, 0x00200020,
0x0060000a, 0x00700080, 0x00104042, 0x00200020, 0x0060000a, 0x00700000,
0x001040c5, 0x00400f26, 0x00401068, 0x0060000d, 0x0070008f, 0x0070000e,
0x00408d68, 0x004015e6, 0x007000a0, 0x00700080, 0x0040180f, 0x00700000,
0x00200029, 0x0060000a, 0x0011814d, 0x00110158, 0x00105401, 0x0020003a,
0x00100051, 0x001040c5, 0x0010c1c4, 0x001041c9, 0x0010c1dc, 0x00150210,
0x0012c225, 0x00108238, 0x0010823e, 0x001242c0, 0x00200040, 0x00100280,
0x00128100, 0x00128120, 0x00128143, 0x0011415f, 0x0010815c, 0x0010c140,
0x00104029, 0x00110400, 0x00104d12, 0x00500060, 0x004071e6, 0x00200118,
0x0060000a, 0x00200020, 0x00100620, 0x00154650, 0x00104668, 0x0017466d,
0x0011068b, 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4,
0x001146c6, 0x00200022, 0x001006cc, 0x001246f0, 0x002000c0, 0x00100700,
0x0010c3d7, 0x001043e1, 0x00500060, 0x00200290, 0x0060000a, 0x00104800,
0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00144a00,
0x00104a19, 0x0010ca1c, 0x00110b00, 0x00200028, 0x00100b08, 0x00134c2e,
0x0010cd00, 0x0010cd04, 0x00120d08, 0x00104d80, 0x00104e00, 0x0012d600,
0x00105c00, 0x00104f06, 0x00105406, 0x00105709, 0x00200340, 0x0060000a,
0x00300000, 0x00200680, 0x00406a0f, 0x00200684, 0x00800001, 0x00200b88,
0x0060000a, 0x00209540, 0x0040708a, 0x00201350, 0x00800041, 0x00407c0f,
0x00600006, 0x00407ce6, 0x00700080, 0x002000a2, 0x0060000a, 0x00104280,
0x00200340, 0x0060000a, 0x00200004, 0x00800001, 0x0070008e, 0x00408d68,
0x0040020f, 0x00600006, 0x00409e68, 0x00600007, 0x0070000f, 0x0070000e,
0x00408d68, 0x0091a880, 0x00901ffe, 0x10940000, 0x00200020, 0x0060000b,
0x00500069, 0x0060000c, 0x00401568, 0x00700000, 0x00200001, 0x0040910e,
0x00200021, 0x0060000a, 0x00409b0d, 0x00104a40, 0x00104a50, 0x00104a60,
0x00104a70, 0x00104a80, 0x00104a90, 0x00104aa0, 0x00104ab0, 0x00407e0e,
0x0040130f, 0x00408568, 0x0040a006, 0x0040a105, 0x00600009, 0x00700005,
0x00700006, 0x0060000e, ~0
};
static uint32_t nv4a_ctx_voodoo[] = {
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409965, 0x00409e06,
@ -1100,7 +1616,9 @@ nv40_graph_init(drm_device_t *dev)
case 0x40: ctx_voodoo = nv40_ctx_voodoo; break;
case 0x43: ctx_voodoo = nv43_ctx_voodoo; break;
case 0x46: ctx_voodoo = nv46_ctx_voodoo; break;
case 0x49: ctx_voodoo = nv49_4b_ctx_voodoo; break;
case 0x4a: ctx_voodoo = nv4a_ctx_voodoo; break;
case 0x4b: ctx_voodoo = nv49_4b_ctx_voodoo; break;
case 0x4e: ctx_voodoo = nv4e_ctx_voodoo; break;
default:
DRM_ERROR("Unknown ctx_voodoo for chipset 0x%02x\n",
@ -1114,15 +1632,15 @@ nv40_graph_init(drm_device_t *dev)
DRM_DEBUG("Loading context-switch voodoo\n");
i = 0;
NV_WRITE(0x400324, 0);
NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
while (ctx_voodoo[i] != ~0) {
NV_WRITE(0x400328, ctx_voodoo[i]);
NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, ctx_voodoo[i]);
i++;
}
}
/* No context present currently */
NV_WRITE(0x40032C, 0x00000000);
NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
NV_WRITE(NV03_PGRAPH_INTR_EN, 0x00000000);
NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF);

334
shared-core/nv50_fifo.c Normal file
View File

@ -0,0 +1,334 @@
/*
* Copyright (C) 2007 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
typedef struct {
nouveau_gpuobj_ref_t *thingo;
nouveau_gpuobj_ref_t *dummyctx;
} nv50_fifo_priv;
#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
static void
nv50_fifo_init_thingo(drm_device_t *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv;
nouveau_gpuobj_ref_t *thingo = priv->thingo;
int i, fi=2;
DRM_DEBUG("\n");
INSTANCE_WR(thingo->gpuobj, 0, 0x7e);
INSTANCE_WR(thingo->gpuobj, 1, 0x7e);
for (i = 0; i <NV_MAX_FIFO_NUMBER; i++, fi) {
if (dev_priv->fifos[i]) {
INSTANCE_WR(thingo->gpuobj, fi, i);
fi++;
}
}
NV_WRITE(0x32f4, thingo->instance >> 12);
NV_WRITE(0x32ec, fi);
NV_WRITE(0x2500, 0x101);
}
static int
nv50_fifo_channel_enable(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
DRM_DEBUG("ch%d\n", channel);
if (IS_G80) {
if (!chan->ramin)
return DRM_ERR(EINVAL);
NV_WRITE(NV50_PFIFO_CTX_TABLE(channel),
(chan->ramin->instance >> 12) |
NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
} else {
if (!chan->ramfc)
return DRM_ERR(EINVAL);
NV_WRITE(NV50_PFIFO_CTX_TABLE(channel),
(chan->ramfc->instance >> 8) |
NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
}
nv50_fifo_init_thingo(dev);
return 0;
}
static void
nv50_fifo_channel_disable(drm_device_t *dev, int channel, int nt)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("ch%d, nt=%d\n", channel, nt);
if (IS_G80) {
NV_WRITE(NV50_PFIFO_CTX_TABLE(channel),
NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80);
} else {
NV_WRITE(NV50_PFIFO_CTX_TABLE(channel),
NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84);
}
if (!nt) nv50_fifo_init_thingo(dev);
}
static void
nv50_fifo_init_reset(drm_device_t *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
uint32_t pmc_e;
DRM_DEBUG("\n");
pmc_e = NV_READ(NV03_PMC_ENABLE);
NV_WRITE(NV03_PMC_ENABLE, pmc_e & ~NV_PMC_ENABLE_PFIFO);
pmc_e = NV_READ(NV03_PMC_ENABLE);
NV_WRITE(NV03_PMC_ENABLE, pmc_e | NV_PMC_ENABLE_PFIFO);
}
static void
nv50_fifo_init_context_table(drm_device_t *dev)
{
int i;
DRM_DEBUG("\n");
for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++)
nv50_fifo_channel_disable(dev, i, 1);
nv50_fifo_init_thingo(dev);
}
static void
nv50_fifo_init_regs__nv(drm_device_t *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
NV_WRITE(0x250c, 0x6f3cfc34);
}
static int
nv50_fifo_init_regs(drm_device_t *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv;
int ret;
DRM_DEBUG("\n");
if ((ret = nouveau_gpuobj_new_ref(dev, -1, -1, 0, 0x1000,
0x1000,
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE,
&priv->dummyctx)))
return ret;
NV_WRITE(0x2500, 0);
NV_WRITE(0x3250, 0);
NV_WRITE(0x3220, 0);
NV_WRITE(0x3204, 0);
NV_WRITE(0x3210, 0);
NV_WRITE(0x3270, 0);
if (IS_G80) {
NV_WRITE(0x2600, (priv->dummyctx->instance>>8) | (1<<31));
NV_WRITE(0x27fc, (priv->dummyctx->instance>>8) | (1<<31));
} else {
NV_WRITE(0x2600, (priv->dummyctx->instance>>12) | (1<<31));
NV_WRITE(0x27fc, (priv->dummyctx->instance>>12) | (1<<31));
}
return 0;
}
int
nv50_fifo_init(drm_device_t *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
nv50_fifo_priv *priv;
int ret;
DRM_DEBUG("\n");
priv = drm_calloc(1, sizeof(*priv), DRM_MEM_DRIVER);
if (!priv)
return DRM_ERR(ENOMEM);
dev_priv->Engine.fifo.priv = priv;
nv50_fifo_init_reset(dev);
if ((ret = nouveau_gpuobj_new_ref(dev, -1, -1, 0, (128+2)*4, 0x1000,
NVOBJ_FLAG_ZERO_ALLOC,
&priv->thingo))) {
DRM_ERROR("error creating thingo: %d\n", ret);
return ret;
}
nv50_fifo_init_context_table(dev);
nv50_fifo_init_regs__nv(dev);
if ((ret = nv50_fifo_init_regs(dev)))
return ret;
return 0;
}
void
nv50_fifo_takedown(drm_device_t *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv;
DRM_DEBUG("\n");
if (!priv)
return;
nouveau_gpuobj_ref_del(dev, &priv->thingo);
nouveau_gpuobj_ref_del(dev, &priv->dummyctx);
dev_priv->Engine.fifo.priv = NULL;
drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER);
}
int
nv50_fifo_create_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
nouveau_gpuobj_t *ramfc = NULL;
int ret;
DRM_DEBUG("ch%d\n", channel);
if (IS_G80) {
uint32_t ramfc_offset;
ramfc_offset = chan->ramin->gpuobj->im_pramin->start + 0x1000;
if ((ret = nouveau_gpuobj_new_fake(dev, ramfc_offset, 0x100,
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE,
&ramfc, &chan->ramfc)))
return ret;
} else {
if ((ret = nouveau_gpuobj_new_ref(dev, channel, -1, 0, 0x100,
256,
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE,
&chan->ramfc)))
return ret;
ramfc = chan->ramfc->gpuobj;
}
INSTANCE_WR(ramfc, 0x48/4, chan->pushbuf->instance >> 4);
INSTANCE_WR(ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
INSTANCE_WR(ramfc, 0x3c/4, 0x000f0078); /* fetch? */
INSTANCE_WR(ramfc, 0x44/4, 0x2101ffff);
INSTANCE_WR(ramfc, 0x60/4, 0x7fffffff);
INSTANCE_WR(ramfc, 0x10/4, 0x00000000);
INSTANCE_WR(ramfc, 0x08/4, 0x00000000);
INSTANCE_WR(ramfc, 0x40/4, 0x00000000);
INSTANCE_WR(ramfc, 0x50/4, 0x2039b2e0);
INSTANCE_WR(ramfc, 0x54/4, 0x000f0000);
INSTANCE_WR(ramfc, 0x7c/4, 0x30000001);
INSTANCE_WR(ramfc, 0x78/4, 0x00000000);
INSTANCE_WR(ramfc, 0x4c/4, 0x00007fff);
if (!IS_G80) {
INSTANCE_WR(chan->ramin->gpuobj, 0, channel);
INSTANCE_WR(chan->ramin->gpuobj, 1, chan->ramfc->instance);
INSTANCE_WR(ramfc, 0x88/4, 0x3d520); /* some vram addy >> 10 */
INSTANCE_WR(ramfc, 0x98/4, chan->ramin->instance >> 12);
}
if ((ret = nv50_fifo_channel_enable(dev, channel))) {
DRM_ERROR("error enabling ch%d: %d\n", channel, ret);
nouveau_gpuobj_ref_del(dev, &chan->ramfc);
return ret;
}
return 0;
}
void
nv50_fifo_destroy_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
DRM_DEBUG("ch%d\n", channel);
nv50_fifo_channel_disable(dev, channel, 0);
nouveau_gpuobj_ref_del(dev, &chan->ramfc);
}
int
nv50_fifo_load_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
nouveau_gpuobj_t *ramfc = chan->ramfc->gpuobj;
DRM_DEBUG("ch%d\n", channel);
/*XXX: incomplete, only touches the regs that NV does */
NV_WRITE(0x3244, 0);
NV_WRITE(0x3240, 0);
NV_WRITE(0x3224, INSTANCE_RD(ramfc, 0x3c/4));
NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, INSTANCE_RD(ramfc, 0x48/4));
NV_WRITE(0x3234, INSTANCE_RD(ramfc, 0x4c/4));
NV_WRITE(0x3254, 1);
NV_WRITE(NV03_PFIFO_RAMHT, INSTANCE_RD(ramfc, 0x80/4));
if (!IS_G80) {
NV_WRITE(0x340c, INSTANCE_RD(ramfc, 0x88/4));
NV_WRITE(0x3410, INSTANCE_RD(ramfc, 0x98/4));
}
NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, channel | (1<<16));
return 0;
}
int
nv50_fifo_save_context(drm_device_t *dev, int channel)
{
DRM_DEBUG("ch%d\n", channel);
DRM_ERROR("stub!\n");
return 0;
}

301
shared-core/nv50_graph.c Normal file
View File

@ -0,0 +1,301 @@
/*
* Copyright (C) 2007 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
static void
nv50_graph_init_reset(drm_device_t *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
uint32_t pmc_e;
DRM_DEBUG("\n");
pmc_e = NV_READ(NV03_PMC_ENABLE);
NV_WRITE(NV03_PMC_ENABLE, pmc_e & ~NV_PMC_ENABLE_PGRAPH);
pmc_e = NV_READ(NV03_PMC_ENABLE);
NV_WRITE(NV03_PMC_ENABLE, pmc_e | NV_PMC_ENABLE_PGRAPH);
}
static void
nv50_graph_init_regs__nv(drm_device_t *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
NV_WRITE(0x400804, 0xc0000000);
NV_WRITE(0x406800, 0xc0000000);
NV_WRITE(0x400c04, 0xc0000000);
NV_WRITE(0x401804, 0xc0000000);
NV_WRITE(0x405018, 0xc0000000);
NV_WRITE(0x402000, 0xc0000000);
NV_WRITE(0x400108, 0xffffffff);
NV_WRITE(0x400100, 0xffffffff);
NV_WRITE(0x400824, 0x00004000);
NV_WRITE(0x400500, 0x00010001);
}
static void
nv50_graph_init_regs(drm_device_t *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
NV_WRITE(NV04_PGRAPH_DEBUG_3, (1<<2) /* HW_CONTEXT_SWITCH_ENABLED */);
}
static uint32_t nv84_ctx_voodoo[] = {
0x0070008e, 0x0070009c, 0x00200020, 0x00600008, 0x0050004c, 0x00400e89,
0x00200000, 0x00600007, 0x00300000, 0x00c000ff, 0x00200000, 0x008000ff,
0x00700009, 0x0041634d, 0x00402944, 0x00402905, 0x0040290d, 0x00413e06,
0x00600005, 0x004015c5, 0x00600011, 0x0040270b, 0x004021c5, 0x00700000,
0x00700081, 0x00600004, 0x0050004a, 0x00216f40, 0x00600007, 0x00c02801,
0x0020002e, 0x00800001, 0x005000cb, 0x0090ffff, 0x0091ffff, 0x00200020,
0x00600008, 0x0050004c, 0x00600009, 0x00413e45, 0x0041594d, 0x0070009d,
0x00402dcf, 0x0070009f, 0x0050009f, 0x00402ac0, 0x00200200, 0x00600008,
0x00402a4f, 0x00402ac0, 0x004030cc, 0x00700081, 0x00200000, 0x00600006,
0x00700000, 0x00111bfc, 0x00700083, 0x00300000, 0x00216f40, 0x00600007,
0x00c00b01, 0x0020001e, 0x00800001, 0x005000cb, 0x00c000ff, 0x00700080,
0x00700083, 0x00200047, 0x00600006, 0x0011020a, 0x00200480, 0x00600007,
0x00300000, 0x00c000ff, 0x00c800ff, 0x00414907, 0x00202916, 0x008000ff,
0x0040508c, 0x005000cb, 0x00a0023f, 0x00200040, 0x00600006, 0x0070000f,
0x00170202, 0x0011020a, 0x00200032, 0x0010020d, 0x001c0242, 0x00120302,
0x00140402, 0x00180500, 0x00130509, 0x00150550, 0x00110605, 0x0020000f,
0x00100607, 0x00110700, 0x00110900, 0x00120902, 0x00110a00, 0x00160b02,
0x00120b28, 0x00140b2b, 0x00110c01, 0x00111400, 0x00111405, 0x00111407,
0x00111409, 0x0011140b, 0x002000cb, 0x00101500, 0x0040790f, 0x0040794b,
0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040798c,
0x005000cb, 0x00000000, 0x0020002b, 0x00101a05, 0x00131c00, 0x00121c04,
0x00141c20, 0x00111c25, 0x00131c40, 0x00121c44, 0x00141c60, 0x00111c65,
0x00131c80, 0x00121c84, 0x00141ca0, 0x00111ca5, 0x00131cc0, 0x00121cc4,
0x00141ce0, 0x00111ce5, 0x00131f00, 0x00191f40, 0x0040a1e0, 0x002001ed,
0x00600006, 0x00200044, 0x00102080, 0x001120c6, 0x001520c9, 0x001920d0,
0x00122100, 0x00122103, 0x00162200, 0x00122207, 0x00112280, 0x00112300,
0x00112302, 0x00122380, 0x0011238b, 0x00112394, 0x0011239c, 0x0040bee1,
0x00200254, 0x00600006, 0x00200044, 0x00102480, 0x0040af0f, 0x0040af4b,
0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040af8c,
0x005000cb, 0x00000000, 0x001124c6, 0x001524c9, 0x001924d0, 0x00122500,
0x00122503, 0x00162600, 0x00122607, 0x00112680, 0x00112700, 0x00112702,
0x00122780, 0x0011278b, 0x00112794, 0x0011279c, 0x0040d1e2, 0x002002bb,
0x00600006, 0x00200044, 0x00102880, 0x001128c6, 0x001528c9, 0x001928d0,
0x00122900, 0x00122903, 0x00162a00, 0x00122a07, 0x00112a80, 0x00112b00,
0x00112b02, 0x00122b80, 0x00112b8b, 0x00112b94, 0x00112b9c, 0x0040eee3,
0x00200322, 0x00600006, 0x00200044, 0x00102c80, 0x0040df0f, 0x0040df4b,
0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040df8c,
0x005000cb, 0x00000000, 0x00112cc6, 0x00152cc9, 0x00192cd0, 0x00122d00,
0x00122d03, 0x00162e00, 0x00122e07, 0x00112e80, 0x00112f00, 0x00112f02,
0x00122f80, 0x00112f8b, 0x00112f94, 0x00112f9c, 0x004101e4, 0x00200389,
0x00600006, 0x00200044, 0x00103080, 0x001130c6, 0x001530c9, 0x001930d0,
0x00123100, 0x00123103, 0x00163200, 0x00123207, 0x00113280, 0x00113300,
0x00113302, 0x00123380, 0x0011338b, 0x00113394, 0x0011339c, 0x00411ee5,
0x002003f0, 0x00600006, 0x00200044, 0x00103480, 0x00410f0f, 0x00410f4b,
0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x00410f8c,
0x005000cb, 0x00000000, 0x001134c6, 0x001534c9, 0x001934d0, 0x00123500,
0x00123503, 0x00163600, 0x00123607, 0x00113680, 0x00113700, 0x00113702,
0x00123780, 0x0011378b, 0x00113794, 0x0011379c, 0x00000000, 0x0041250f,
0x005000cb, 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x005000cb,
0x00412887, 0x0060000a, 0x00000000, 0x00413700, 0x007000a0, 0x00700080,
0x00200480, 0x00600007, 0x00200004, 0x00c000ff, 0x008000ff, 0x005000cb,
0x00700000, 0x00200000, 0x00600006, 0x00111bfe, 0x0041594d, 0x00700000,
0x00200000, 0x00600006, 0x00111bfe, 0x00700080, 0x0070001d, 0x0040114d,
0x00700081, 0x00600004, 0x0050004a, 0x00414388, 0x0060000b, 0x00200000,
0x00600006, 0x00700000, 0x0041590b, 0x00111bfd, 0x0040424d, 0x00202916,
0x008000fd, 0x005000cb, 0x00c00002, 0x00200480, 0x00600007, 0x00200160,
0x00800002, 0x005000cb, 0x00c01802, 0x002027b6, 0x00800002, 0x005000cb,
0x00404e4d, 0x0060000b, 0x0041574d, 0x00700001, 0x005000cf, 0x00700003,
0x00415e06, 0x00415f05, 0x0060000d, 0x00700005, 0x0070000d, 0x00700006,
0x0070000b, 0x0070000e, 0x0070001c, 0x0060000c, ~0
};
static void
nv50_graph_init_ctxctl(drm_device_t *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
uint32_t *voodoo;
DRM_DEBUG("\n");
switch (dev_priv->chipset) {
case 0x84:
voodoo = nv84_ctx_voodoo;
break;
default:
DRM_ERROR("no voodoo for chipset NV%02x\n", dev_priv->chipset);
break;
}
if (voodoo) {
NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
while (*voodoo != ~0) {
NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, *voodoo);
voodoo++;
}
}
NV_WRITE(0x400320, 4);
NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0);
NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, 0);
}
int
nv50_graph_init(drm_device_t *dev)
{
DRM_DEBUG("\n");
nv50_graph_init_reset(dev);
nv50_graph_init_regs__nv(dev);
nv50_graph_init_regs(dev);
nv50_graph_init_ctxctl(dev);
return 0;
}
void
nv50_graph_takedown(drm_device_t *dev)
{
DRM_DEBUG("\n");
}
int
nv50_graph_create_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
nouveau_gpuobj_t *ramin = chan->ramin->gpuobj;
int grctx_size = 0x60000, hdr;
int ret;
DRM_DEBUG("ch%d\n", channel);
if ((ret = nouveau_gpuobj_new_ref(dev, channel, -1, 0,
grctx_size, 0x1000,
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE,
&chan->ramin_grctx)))
return ret;
hdr = IS_G80 ? 0x200 : 0x20;
INSTANCE_WR(ramin, (hdr + 0x00)/4, 0x00190002);
INSTANCE_WR(ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
grctx_size - 1);
INSTANCE_WR(ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance);
INSTANCE_WR(ramin, (hdr + 0x0c)/4, 0);
INSTANCE_WR(ramin, (hdr + 0x10)/4, 0);
INSTANCE_WR(ramin, (hdr + 0x14)/4, 0x00010000);
return 0;
}
void
nv50_graph_destroy_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
int i, hdr;
DRM_DEBUG("ch%d\n", channel);
hdr = IS_G80 ? 0x200 : 0x20;
for (i=hdr; i<hdr+24; i+=4)
INSTANCE_WR(chan->ramin->gpuobj, i/4, 0);
nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
}
static int
nv50_graph_transfer_context(drm_device_t *dev, uint32_t inst, int save)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
uint32_t old_cp, tv = 20000;
int i;
DRM_DEBUG("inst=0x%08x, save=%d\n", inst, save);
old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER);
NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst | (1<<31));
NV_WRITE(0x400824, NV_READ(0x400824) |
(save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
NV40_PGRAPH_CTXCTL_0310_XFER_LOAD));
NV_WRITE(NV40_PGRAPH_CTXCTL_0304, NV40_PGRAPH_CTXCTL_0304_XFER_CTX);
for (i = 0; i < tv; i++) {
if (NV_READ(NV40_PGRAPH_CTXCTL_030C) == 0)
break;
}
NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
if (i == tv) {
DRM_ERROR("failed: inst=0x%08x save=%d\n", inst, save);
DRM_ERROR("0x40030C = 0x%08x\n",
NV_READ(NV40_PGRAPH_CTXCTL_030C));
return DRM_ERR(EBUSY);
}
return 0;
}
int
nv50_graph_load_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t inst = ((chan->ramin->instance >> 12) | (1<<31));
int ret;
DRM_DEBUG("ch%d\n", channel);
#if 0
if ((ret = nv50_graph_transfer_context(dev, inst, 0)))
return ret;
#endif
NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
NV_WRITE(0x400320, 4);
NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, inst);
return 0;
}
int
nv50_graph_save_context(drm_device_t *dev, int channel)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t inst = ((chan->ramin->instance >> 12) | (1<<31));
DRM_DEBUG("ch%d\n", channel);
return nv50_graph_transfer_context(dev, inst, 1);
}

262
shared-core/nv50_instmem.c Normal file
View File

@ -0,0 +1,262 @@
/*
* Copyright (C) 2007 Ben Skeggs.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
typedef struct {
uint32_t save1700[5]; /* 0x1700->0x1710 */
} nv50_instmem_priv;
#define NV50_INSTMEM_PAGE_SHIFT 12
#define NV50_INSTMEM_PAGE_SIZE (1 << NV50_INSTMEM_PAGE_SHIFT)
#define NV50_INSTMEM_RSVD_SIZE (64 * 1024)
#define NV50_INSTMEM_PT_SIZE(a) (((a) >> 12) << 3)
int
nv50_instmem_init(drm_device_t *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
nv50_instmem_priv *priv;
uint32_t rv, pt, pts, cb, cb0, cb1, unk, as;
uint32_t i, v;
int ret;
priv = drm_calloc(1, sizeof(*priv), DRM_MEM_DRIVER);
if (!priv)
return DRM_ERR(ENOMEM);
dev_priv->Engine.instmem.priv = priv;
/* Save current state */
for (i = 0x1700; i <= 0x1710; i+=4)
priv->save1700[(i-0x1700)/4] = NV_READ(i);
as = dev_priv->ramin->size;
rv = nouveau_mem_fb_amount(dev) - (1*1024*1024);
pt = rv + 0xd0000;
pts = NV50_INSTMEM_PT_SIZE(as);
cb = rv + 0xc8000;
if ((dev_priv->chipset & 0xf0) != 0x50) {
unk = cb + 0x4200;
cb0 = cb + 0x4240;
cb1 = cb + 0x278;
} else {
unk = cb + 0x5400;
cb0 = cb + 0x5440;
cb1 = cb + 0x1438;
}
DRM_DEBUG("PRAMIN config:\n");
DRM_DEBUG(" Rsvd VRAM base: 0x%08x\n", rv);
DRM_DEBUG(" Aperture size: %i MiB\n", as >> 20);
DRM_DEBUG(" PT base: 0x%08x\n", pt);
DRM_DEBUG(" PT size: %d KiB\n", pts >> 10);
DRM_DEBUG(" BIOS image: 0x%08x\n", (NV_READ(0x619f04)&~0xff)<<8);
DRM_DEBUG(" Config base: 0x%08x\n", cb);
DRM_DEBUG(" ctxdma Config0: 0x%08x\n", cb0);
DRM_DEBUG(" Config1: 0x%08x\n", cb1);
/* Map first MiB of reserved vram into BAR0 PRAMIN aperture */
NV_WRITE(0x1700, (rv>>16));
/* Poke some regs.. */
NV_WRITE(0x1704, (cb>>12));
NV_WRITE(0x1710, (((unk-cb)>>4))|(1<<31));
NV_WRITE(0x1704, (cb>>12)|(1<<30));
/* CB0, some DMA object, NFI what it points at... Needed however,
* or the PRAMIN aperture doesn't operate as expected.
*/
NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x00, 0x7fc00000);
NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x04, 0xe1ffffff);
NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x08, 0xe0000000);
NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x0c, 0x01000001);
NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x10, 0x00000000);
NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x14, 0x00000000);
/* CB1, points at PRAMIN PT */
NV_WRITE(NV_RAMIN + (cb1 - rv) + 0, pt | 0x63);
NV_WRITE(NV_RAMIN + (cb1 - rv) + 4, 0x00000000);
/* Zero PRAMIN page table */
v = NV_RAMIN + (pt - rv);
for (i = v; i < v + pts; i += 8) {
NV_WRITE(i + 0x00, 0x00000009);
NV_WRITE(i + 0x04, 0x00000000);
}
/* Map page table into PRAMIN aperture */
for (i = pt; i < pt + pts; i += 0x1000) {
uint32_t pte = NV_RAMIN + (pt-rv) + (((i-pt) >> 12) << 3);
DRM_DEBUG("PRAMIN PTE = 0x%08x @ 0x%08x\n", i, pte);
NV_WRITE(pte + 0x00, i | 1);
NV_WRITE(pte + 0x04, 0x00000000);
}
/* Points at CB0 */
NV_WRITE(0x170c, (((cb0 - cb)>>4)|(1<<31)));
/* Confirm it all worked, should be able to read back the page table's
* PTEs from the PRAMIN BAR
*/
NV_WRITE(0x1700, pt >> 16);
if (NV_READ(0x700000) != NV_RI32(0)) {
DRM_ERROR("Failed to init PRAMIN page table\n");
return DRM_ERR(EINVAL);
}
/* Create a heap to manage PRAMIN aperture allocations */
ret = nouveau_mem_init_heap(&dev_priv->ramin_heap, pts, as-pts);
if (ret) {
DRM_ERROR("Failed to init PRAMIN heap\n");
return DRM_ERR(ENOMEM);
}
DRM_DEBUG("NV50: PRAMIN setup ok\n");
/* Don't alloc the last MiB of VRAM, probably too much, but be safe
* at least for now.
*/
dev_priv->ramin_rsvd_vram = 1*1024*1024;
/*XXX: probably incorrect, but needed to make hash func "work" */
dev_priv->ramht_offset = 0x10000;
dev_priv->ramht_bits = 9;
dev_priv->ramht_size = (1 << dev_priv->ramht_bits);
return 0;
}
void
nv50_instmem_takedown(drm_device_t *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv;
int i;
if (!priv)
return;
/* Restore state from before init */
for (i = 0x1700; i <= 0x1710; i+=4)
NV_WRITE(i, priv->save1700[(i-0x1700)/4]);
dev_priv->Engine.instmem.priv = NULL;
drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER);
}
int
nv50_instmem_populate(drm_device_t *dev, nouveau_gpuobj_t *gpuobj, uint32_t *sz)
{
if (gpuobj->im_backing)
return DRM_ERR(EINVAL);
*sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1);
if (*sz == 0)
return DRM_ERR(EINVAL);
gpuobj->im_backing = nouveau_mem_alloc(dev, NV50_INSTMEM_PAGE_SIZE,
*sz, NOUVEAU_MEM_FB,
(DRMFILE)-2);
if (!gpuobj->im_backing) {
DRM_ERROR("Couldn't allocate vram to back PRAMIN pages\n");
return DRM_ERR(ENOMEM);
}
return 0;
}
void
nv50_instmem_clear(drm_device_t *dev, nouveau_gpuobj_t *gpuobj)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
if (gpuobj && gpuobj->im_backing) {
if (gpuobj->im_bound)
dev_priv->Engine.instmem.unbind(dev, gpuobj);
nouveau_mem_free(dev, gpuobj->im_backing);
gpuobj->im_backing = NULL;
}
}
int
nv50_instmem_bind(drm_device_t *dev, nouveau_gpuobj_t *gpuobj)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
uint32_t pte, pte_end, vram;
if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
return DRM_ERR(EINVAL);
DRM_DEBUG("st=0x%0llx sz=0x%0llx\n",
gpuobj->im_pramin->start, gpuobj->im_pramin->size);
pte = (gpuobj->im_pramin->start >> 12) << 3;
pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
vram = gpuobj->im_backing->start - dev_priv->fb_phys;
if (pte == pte_end) {
DRM_ERROR("WARNING: badness in bind() pte calc\n");
pte_end++;
}
DRM_DEBUG("pramin=0x%llx, pte=%d, pte_end=%d\n",
gpuobj->im_pramin->start, pte, pte_end);
DRM_DEBUG("first vram page: 0x%llx\n",
gpuobj->im_backing->start);
while (pte < pte_end) {
NV_WI32(pte + 0, vram | 1);
NV_WI32(pte + 4, 0x00000000);
pte += 8;
vram += NV50_INSTMEM_PAGE_SIZE;
}
gpuobj->im_bound = 1;
return 0;
}
int
nv50_instmem_unbind(drm_device_t *dev, nouveau_gpuobj_t *gpuobj)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
uint32_t pte, pte_end;
if (gpuobj->im_bound == 0)
return DRM_ERR(EINVAL);
pte = (gpuobj->im_pramin->start >> 12) << 3;
pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
while (pte < pte_end) {
NV_WI32(pte + 0, 0x00000000);
NV_WI32(pte + 4, 0x00000000);
pte += 8;
}
gpuobj->im_bound = 0;
return 0;
}

42
shared-core/nv50_mc.c Normal file
View File

@ -0,0 +1,42 @@
/*
* Copyright (C) 2007 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
int
nv50_mc_init(drm_device_t *dev)
{
drm_nouveau_private_t *dev_priv = dev->dev_private;
NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF);
return 0;
}
void nv50_mc_takedown(drm_device_t *dev)
{
}

View File

@ -148,15 +148,16 @@ void r300_init_reg_flags(void)
/* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
ADD_RANGE(0x2080, 1);
ADD_RANGE(R300_VAP_CNTL, 1);
ADD_RANGE(R300_SE_VTE_CNTL, 2);
ADD_RANGE(0x2134, 2);
ADD_RANGE(0x2140, 1);
ADD_RANGE(R300_VAP_CNTL_STATUS, 1);
ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
ADD_RANGE(0x21DC, 1);
ADD_RANGE(0x221C, 1);
ADD_RANGE(0x2220, 4);
ADD_RANGE(0x2288, 1);
ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
ADD_RANGE(R300_VAP_CLIP_X_0, 4);
ADD_RANGE(R300_VAP_PVS_WAITIDLE, 1);
ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
ADD_RANGE(R300_GB_ENABLE, 1);
@ -168,13 +169,13 @@ void r300_init_reg_flags(void)
ADD_RANGE(R300_RE_POINTSIZE, 1);
ADD_RANGE(0x4230, 3);
ADD_RANGE(R300_RE_LINE_CNT, 1);
ADD_RANGE(0x4238, 1);
ADD_RANGE(R300_RE_UNK4238, 1);
ADD_RANGE(0x4260, 3);
ADD_RANGE(0x4274, 4);
ADD_RANGE(0x4288, 5);
ADD_RANGE(0x42A0, 1);
ADD_RANGE(R300_RE_SHADE, 4);
ADD_RANGE(R300_RE_POLYGON_MODE, 5);
ADD_RANGE(R300_RE_ZBIAS_CNTL, 1);
ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
ADD_RANGE(0x42B4, 1);
ADD_RANGE(R300_RE_OCCLUSION_CNTL, 1);
ADD_RANGE(R300_RE_CULL_CNTL, 1);
ADD_RANGE(0x42C0, 2);
ADD_RANGE(R300_RS_CNTL_0, 2);
@ -190,22 +191,22 @@ void r300_init_reg_flags(void)
ADD_RANGE(R300_PFS_INSTR1_0, 64);
ADD_RANGE(R300_PFS_INSTR2_0, 64);
ADD_RANGE(R300_PFS_INSTR3_0, 64);
ADD_RANGE(0x4BC0, 1);
ADD_RANGE(0x4BC8, 3);
ADD_RANGE(R300_RE_FOG_STATE, 1);
ADD_RANGE(R300_FOG_COLOR_R, 3);
ADD_RANGE(R300_PP_ALPHA_TEST, 2);
ADD_RANGE(0x4BD8, 1);
ADD_RANGE(R300_PFS_PARAM_0_X, 64);
ADD_RANGE(0x4E00, 1);
ADD_RANGE(R300_RB3D_CBLEND, 2);
ADD_RANGE(R300_RB3D_COLORMASK, 1);
ADD_RANGE(0x4E10, 3);
ADD_RANGE(R300_RB3D_BLEND_COLOR, 3);
ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */
ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
ADD_RANGE(0x4E50, 9);
ADD_RANGE(0x4E88, 1);
ADD_RANGE(0x4EA0, 2);
ADD_RANGE(R300_RB3D_ZSTENCIL_CNTL_0, 3);
ADD_RANGE(0x4F10, 4);
ADD_RANGE(R300_RB3D_ZSTENCIL_FORMAT, 4);
ADD_RANGE_MARK(R300_RB3D_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
ADD_RANGE(R300_RB3D_DEPTHPITCH, 1);
ADD_RANGE(0x4F28, 1);

View File

@ -23,6 +23,8 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
/* *INDENT-OFF* */
#ifndef _R300_REG_H
#define _R300_REG_H
@ -114,6 +116,8 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
# define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */
#define R300_VAP_OUTPUT_VTX_FMT_1 0x2094
/* each of the following is 3 bits wide, specifies number
of components */
# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
@ -145,6 +149,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
# define R300_VC_NO_SWAP (0 << 0)
# define R300_VC_16BIT_SWAP (1 << 0)
# define R300_VC_32BIT_SWAP (2 << 0)
# define R300_VAP_TCL_BYPASS (1 << 8)
/* gap */
@ -296,6 +301,18 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
# define R300_221C_NORMAL 0x00000000
# define R300_221C_CLEAR 0x0001C000
/* These seem to be per-pixel and per-vertex X and Y clipping planes. The first
* plane is per-pixel and the second plane is per-vertex.
*
* This was determined by experimentation alone but I believe it is correct.
*
* These registers are called X_QUAD0_1_FL to X_QUAD0_4_FL by glxtest.
*/
#define R300_VAP_CLIP_X_0 0x2220
#define R300_VAP_CLIP_X_1 0x2224
#define R300_VAP_CLIP_Y_0 0x2228
#define R300_VAP_CLIP_Y_1 0x2230
/* gap */
/* Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between
@ -319,13 +336,15 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
* The meaning of the two UNKNOWN fields is obviously not known. However,
* experiments so far have shown that both *must* point to an instruction
* inside the vertex program, otherwise the GPU locks up.
*
* fglrx usually sets CNTL_3_UNKNOWN to the end of the program and
* CNTL_1_UNKNOWN points to instruction where last write to position takes
* place.
* R300_PVS_CNTL_1_POS_END_SHIFT points to instruction where last write to
* position takes place.
*
* Most likely this is used to ignore rest of the program in cases
* where group of verts arent visible. For some reason this "section"
* is sometimes accepted other instruction that have no relationship with
*position calculations.
* position calculations.
*/
#define R300_VAP_PVS_CNTL_1 0x22D0
# define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0
@ -487,6 +506,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
# define R300_GB_W_SELECT_1 (1<<4)
#define R300_GB_AA_CONFIG 0x4020
# define R300_AA_DISABLE 0x00
# define R300_AA_ENABLE 0x01
# define R300_AA_SUBSAMPLES_2 0
# define R300_AA_SUBSAMPLES_3 (1<<1)
@ -669,6 +689,11 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
/* Special handling for color: When the fragment program uses color,
* the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the
* color register index.
*
* Apperently you may set the R300_RS_ROUTE_0_COLOR bit, but not provide any
* R300_RS_ROUTE_0_COLOR_DEST value; this setup is used for clearing the state.
* See r300_ioctl.c:r300EmitClearState. I'm not sure if this setup is strictly
* correct or not. - Oliver.
*/
# define R300_RS_ROUTE_0_COLOR (1 << 14)
# define R300_RS_ROUTE_0_COLOR_DEST_SHIFT 17
@ -958,7 +983,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
* first node is stored in NODE_2, the second node is stored in NODE_3.
*
* Offsets are relative to the master offset from PFS_CNTL_2.
* LAST_NODE is set for the last node, and only for the last node.
*/
#define R300_PFS_NODE_0 0x4610
#define R300_PFS_NODE_1 0x4614
@ -972,7 +996,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
# define R300_PFS_NODE_TEX_OFFSET_MASK (31 << 12)
# define R300_PFS_NODE_TEX_END_SHIFT 17
# define R300_PFS_NODE_TEX_END_MASK (31 << 17)
/*# define R300_PFS_NODE_LAST_NODE (1 << 22) */
# define R300_PFS_NODE_OUTPUT_COLOR (1 << 22)
# define R300_PFS_NODE_OUTPUT_DEPTH (1 << 23)
@ -1553,6 +1576,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
#define R300_PRIM_COLOR_ORDER_BGRA (0 << 6)
#define R300_PRIM_COLOR_ORDER_RGBA (1 << 6)
#define R300_PRIM_NUM_VERTICES_SHIFT 16
#define R300_PRIM_NUM_VERTICES_MASK 0xffff
/* Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR.
* Two parameter dwords:
@ -1581,6 +1605,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
# define R300_EB_UNK1_SHIFT 24
# define R300_EB_UNK1 (0x80<<24)
# define R300_EB_UNK2 0x0810
#define R300_PACKET3_3D_DRAW_VBUF_2 0x00003400
#define R300_PACKET3_3D_DRAW_INDX_2 0x00003600
/* END: Packet 3 commands */
@ -1601,5 +1626,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#define R300_CP_CMD_BITBLT_MULTI 0xC0009B00
#endif /* _R300_REG_H */
/* *INDENT-ON* */

View File

@ -1190,9 +1190,15 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
/* Set ring buffer size */
#ifdef __BIG_ENDIAN
RADEON_WRITE(RADEON_CP_RB_CNTL,
dev_priv->ring.size_l2qw | RADEON_BUF_SWAP_32BIT);
RADEON_BUF_SWAP_32BIT |
(dev_priv->ring.fetch_size_l2ow << 18) |
(dev_priv->ring.rptr_update_l2qw << 8) |
dev_priv->ring.size_l2qw);
#else
RADEON_WRITE(RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw);
RADEON_WRITE(RADEON_CP_RB_CNTL,
(dev_priv->ring.fetch_size_l2ow << 18) |
(dev_priv->ring.rptr_update_l2qw << 8) |
dev_priv->ring.size_l2qw);
#endif
/* Start with assuming that writeback doesn't work */
@ -1391,8 +1397,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
DRM_DEBUG("\n");
/* if we require new memory map but we don't have it fail */
if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap)
{
if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
@ -1424,6 +1429,10 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
return DRM_ERR(EINVAL);
}
/* Enable vblank on CRTC1 for older X servers
*/
dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
switch(init->func) {
case RADEON_INIT_R200_CP:
dev_priv->microcode_version = UCODE_R200;
@ -1663,6 +1672,12 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
dev_priv->ring.size = init->ring_size;
dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8);
dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16);
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;

View File

@ -655,6 +655,7 @@ typedef struct drm_radeon_indirect {
#define RADEON_PARAM_GART_TEX_HANDLE 10
#define RADEON_PARAM_SCRATCH_OFFSET 11
#define RADEON_PARAM_CARD_TYPE 12
#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */
typedef struct drm_radeon_getparam {
int param;
@ -709,7 +710,7 @@ typedef struct drm_radeon_setparam {
#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */
#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */
#define RADEON_SETPARAM_VBLANK_CRTC 6 /* VBLANK CRTC */
/* 1.14: Clients can allocate/free a surface
*/
typedef struct drm_radeon_surface_alloc {
@ -722,5 +723,7 @@ typedef struct drm_radeon_surface_free {
unsigned int address;
} drm_radeon_surface_free_t;
#define DRM_RADEON_VBLANK_CRTC1 1
#define DRM_RADEON_VBLANK_CRTC2 2
#endif

View File

@ -97,10 +97,11 @@
* new packet type)
* 1.26- Add support for variable size PCI(E) gart aperture
* 1.27- Add support for IGP GART
* 1.28- Add support for VBL on CRTC2
*/
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 27
#define DRIVER_MINOR 28
#define DRIVER_PATCHLEVEL 0
/*
@ -163,8 +164,14 @@ typedef struct drm_radeon_freelist {
typedef struct drm_radeon_ring_buffer {
u32 *start;
u32 *end;
int size;
int size_l2qw;
int size; /* Double Words */
int size_l2qw; /* log2 Quad Words */
int rptr_update; /* Double Words */
int rptr_update_l2qw; /* log2 Quad Words */
int fetch_size; /* Double Words */
int fetch_size_l2ow; /* log2 Oct Words */
u32 tail;
u32 tail_mask;
@ -279,6 +286,9 @@ typedef struct drm_radeon_private {
/* SW interrupt */
wait_queue_head_t swi_queue;
atomic_t swi_emitted;
int vblank_crtc;
uint32_t irq_enable_reg;
int irq_enabled;
struct radeon_surface surfaces[RADEON_MAX_SURFACES];
struct radeon_virt_surface virt_surfaces[2*RADEON_MAX_SURFACES];
@ -355,10 +365,14 @@ extern int radeon_irq_wait(DRM_IOCTL_ARGS);
extern void radeon_do_release(drm_device_t * dev);
extern int radeon_driver_vblank_wait(drm_device_t * dev,
unsigned int *sequence);
extern int radeon_driver_vblank_wait2(drm_device_t * dev,
unsigned int *sequence);
extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
extern void radeon_driver_irq_preinstall(drm_device_t * dev);
extern void radeon_driver_irq_postinstall(drm_device_t * dev);
extern void radeon_driver_irq_uninstall(drm_device_t * dev);
extern int radeon_vblank_crtc_get(drm_device_t *dev);
extern int radeon_vblank_crtc_set(drm_device_t *dev, int64_t value);
extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
extern int radeon_driver_unload(struct drm_device *dev);
@ -495,12 +509,15 @@ extern int r300_do_cp_cmdbuf(drm_device_t *dev, DRMFILE filp,
#define RADEON_GEN_INT_CNTL 0x0040
# define RADEON_CRTC_VBLANK_MASK (1 << 0)
# define RADEON_CRTC2_VBLANK_MASK (1 << 9)
# define RADEON_GUI_IDLE_INT_ENABLE (1 << 19)
# define RADEON_SW_INT_ENABLE (1 << 25)
#define RADEON_GEN_INT_STATUS 0x0044
# define RADEON_CRTC_VBLANK_STAT (1 << 0)
# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
# define RADEON_CRTC2_VBLANK_STAT (1 << 9)
# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
# define RADEON_GUI_IDLE_INT_TEST_ACK (1 << 19)
# define RADEON_SW_INT_TEST (1 << 25)
# define RADEON_SW_INT_TEST_ACK (1 << 25)
@ -601,9 +618,51 @@ extern int r300_do_cp_cmdbuf(drm_device_t *dev, DRMFILE filp,
# define RADEON_SOFT_RESET_E2 (1 << 5)
# define RADEON_SOFT_RESET_RB (1 << 6)
# define RADEON_SOFT_RESET_HDP (1 << 7)
/*
* 6:0 Available slots in the FIFO
* 8 Host Interface active
* 9 CP request active
* 10 FIFO request active
* 11 Host Interface retry active
* 12 CP retry active
* 13 FIFO retry active
* 14 FIFO pipeline busy
* 15 Event engine busy
* 16 CP command stream busy
* 17 2D engine busy
* 18 2D portion of render backend busy
* 20 3D setup engine busy
* 26 GA engine busy
* 27 CBA 2D engine busy
* 31 2D engine busy or 3D engine busy or FIFO not empty or CP busy or
* command stream queue not empty or Ring Buffer not empty
*/
#define RADEON_RBBM_STATUS 0x0e40
/* Same as the previous RADEON_RBBM_STATUS; this is a mirror of that register. */
/* #define RADEON_RBBM_STATUS 0x1740 */
/* bits 6:0 are dword slots available in the cmd fifo */
# define RADEON_RBBM_FIFOCNT_MASK 0x007f
# define RADEON_RBBM_ACTIVE (1 << 31)
# define RADEON_HIRQ_ON_RBB (1 << 8)
# define RADEON_CPRQ_ON_RBB (1 << 9)
# define RADEON_CFRQ_ON_RBB (1 << 10)
# define RADEON_HIRQ_IN_RTBUF (1 << 11)
# define RADEON_CPRQ_IN_RTBUF (1 << 12)
# define RADEON_CFRQ_IN_RTBUF (1 << 13)
# define RADEON_PIPE_BUSY (1 << 14)
# define RADEON_ENG_EV_BUSY (1 << 15)
# define RADEON_CP_CMDSTRM_BUSY (1 << 16)
# define RADEON_E2_BUSY (1 << 17)
# define RADEON_RB2D_BUSY (1 << 18)
# define RADEON_RB3D_BUSY (1 << 19) /* not used on r300 */
# define RADEON_VAP_BUSY (1 << 20)
# define RADEON_RE_BUSY (1 << 21) /* not used on r300 */
# define RADEON_TAM_BUSY (1 << 22) /* not used on r300 */
# define RADEON_TDM_BUSY (1 << 23) /* not used on r300 */
# define RADEON_PB_BUSY (1 << 24) /* not used on r300 */
# define RADEON_TIM_BUSY (1 << 25) /* not used on r300 */
# define RADEON_GA_BUSY (1 << 26)
# define RADEON_CBA2D_BUSY (1 << 27)
# define RADEON_RBBM_ACTIVE (1 << 31)
#define RADEON_RE_LINE_PATTERN 0x1cd0
#define RADEON_RE_MISC 0x26c4
#define RADEON_RE_TOP_LEFT 0x26c0

View File

@ -73,18 +73,35 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
* outside the DRM
*/
stat = radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
RADEON_CRTC_VBLANK_STAT));
RADEON_CRTC_VBLANK_STAT |
RADEON_CRTC2_VBLANK_STAT));
if (!stat)
return IRQ_NONE;
stat &= dev_priv->irq_enable_reg;
/* SW interrupt */
if (stat & RADEON_SW_INT_TEST) {
DRM_WAKEUP(&dev_priv->swi_queue);
}
/* VBLANK interrupt */
if (stat & RADEON_CRTC_VBLANK_STAT) {
atomic_inc(&dev->vbl_received);
if (stat & (RADEON_CRTC_VBLANK_STAT|RADEON_CRTC2_VBLANK_STAT)) {
int vblank_crtc = dev_priv->vblank_crtc;
if ((vblank_crtc &
(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) ==
(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
if (stat & RADEON_CRTC_VBLANK_STAT)
atomic_inc(&dev->vbl_received);
if (stat & RADEON_CRTC2_VBLANK_STAT)
atomic_inc(&dev->vbl_received2);
} else if (((stat & RADEON_CRTC_VBLANK_STAT) &&
(vblank_crtc & DRM_RADEON_VBLANK_CRTC1)) ||
((stat & RADEON_CRTC2_VBLANK_STAT) &&
(vblank_crtc & DRM_RADEON_VBLANK_CRTC2)))
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals(dev);
}
@ -127,19 +144,30 @@ static int radeon_wait_irq(drm_device_t * dev, int swi_nr)
return ret;
}
int radeon_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
int radeon_driver_vblank_do_wait(drm_device_t * dev, unsigned int *sequence,
int crtc)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
unsigned int cur_vblank;
int ret = 0;
int ack = 0;
atomic_t *counter;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
}
radeon_acknowledge_irqs(dev_priv, RADEON_CRTC_VBLANK_STAT);
if (crtc == DRM_RADEON_VBLANK_CRTC1) {
counter = &dev->vbl_received;
ack |= RADEON_CRTC_VBLANK_STAT;
} else if (crtc == DRM_RADEON_VBLANK_CRTC2) {
counter = &dev->vbl_received2;
ack |= RADEON_CRTC2_VBLANK_STAT;
} else
return DRM_ERR(EINVAL);
radeon_acknowledge_irqs(dev_priv, ack);
dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
@ -148,7 +176,7 @@ int radeon_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
* using vertical blanks...
*/
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(&dev->vbl_received))
(((cur_vblank = atomic_read(counter))
- *sequence) <= (1 << 23)));
*sequence = cur_vblank;
@ -156,6 +184,16 @@ int radeon_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
return ret;
}
int radeon_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
{
return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC1);
}
int radeon_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence)
{
return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC2);
}
/* Needs the lock as it touches the ring.
*/
int radeon_irq_emit(DRM_IOCTL_ARGS)
@ -204,6 +242,21 @@ int radeon_irq_wait(DRM_IOCTL_ARGS)
return radeon_wait_irq(dev, irqwait.irq_seq);
}
static void radeon_enable_interrupt(drm_device_t *dev)
{
drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
dev_priv->irq_enable_reg = RADEON_SW_INT_ENABLE;
if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC1)
dev_priv->irq_enable_reg |= RADEON_CRTC_VBLANK_MASK;
if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC2)
dev_priv->irq_enable_reg |= RADEON_CRTC2_VBLANK_MASK;
RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
dev_priv->irq_enabled = 1;
}
/* drm_dma.h hooks
*/
void radeon_driver_irq_preinstall(drm_device_t * dev)
@ -216,7 +269,8 @@ void radeon_driver_irq_preinstall(drm_device_t * dev)
/* Clear bits if they're already high */
radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
RADEON_CRTC_VBLANK_STAT));
RADEON_CRTC_VBLANK_STAT |
RADEON_CRTC2_VBLANK_STAT));
}
void radeon_driver_irq_postinstall(drm_device_t * dev)
@ -227,9 +281,7 @@ void radeon_driver_irq_postinstall(drm_device_t * dev)
atomic_set(&dev_priv->swi_emitted, 0);
DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
/* Turn on SW and VBL ints */
RADEON_WRITE(RADEON_GEN_INT_CNTL,
RADEON_CRTC_VBLANK_MASK | RADEON_SW_INT_ENABLE);
radeon_enable_interrupt(dev);
}
void radeon_driver_irq_uninstall(drm_device_t * dev)
@ -239,6 +291,38 @@ void radeon_driver_irq_uninstall(drm_device_t * dev)
if (!dev_priv)
return;
dev_priv->irq_enabled = 0;
/* Disable *all* interrupts */
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
}
int radeon_vblank_crtc_get(drm_device_t *dev)
{
drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
u32 flag;
u32 value;
flag = RADEON_READ(RADEON_GEN_INT_CNTL);
value = 0;
if (flag & RADEON_CRTC_VBLANK_MASK)
value |= DRM_RADEON_VBLANK_CRTC1;
if (flag & RADEON_CRTC2_VBLANK_MASK)
value |= DRM_RADEON_VBLANK_CRTC2;
return value;
}
int radeon_vblank_crtc_set(drm_device_t *dev, int64_t value)
{
drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value);
return DRM_ERR(EINVAL);
}
dev_priv->vblank_crtc = (unsigned int)value;
radeon_enable_interrupt(dev);
return 0;
}

View File

@ -3131,6 +3131,9 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
else
value = RADEON_CARD_PCI;
break;
case RADEON_PARAM_VBLANK_CRTC:
value = radeon_vblank_crtc_get(dev);
break;
default:
DRM_DEBUG( "Invalid parameter %d\n", param.param );
return DRM_ERR(EINVAL);
@ -3192,6 +3195,9 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE)
dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
break;
case RADEON_SETPARAM_VBLANK_CRTC:
return radeon_vblank_crtc_set(dev, sp.value);
break;
default:
DRM_DEBUG("Invalid parameter %d\n", sp.param);
return DRM_ERR(EINVAL);

View File

@ -33,11 +33,11 @@
#define DRIVER_AUTHOR "SIS, Tungsten Graphics"
#define DRIVER_NAME "sis"
#define DRIVER_DESC "SIS 300/630/540"
#define DRIVER_DATE "20060619"
#define DRIVER_DESC "SIS 300/630/540 and XGI V3XE/V5/V8"
#define DRIVER_DATE "20070626"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 2
#define DRIVER_PATCHLEVEL 1
#define DRIVER_MINOR 3
#define DRIVER_PATCHLEVEL 0
enum sis_family {
SIS_OTHER = 0,

View File

@ -84,9 +84,9 @@ static uint32_t via_cmdbuf_lag(drm_via_private_t *dev_priv)
{
uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
return ((hw_addr <= dev_priv->dma_low) ?
(dev_priv->dma_low - hw_addr) :
return ((hw_addr <= dev_priv->dma_low) ?
(dev_priv->dma_low - hw_addr) :
(dev_priv->dma_wrap + dev_priv->dma_low - hw_addr));
}
@ -103,7 +103,7 @@ via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
uint32_t count;
hw_addr_ptr = dev_priv->hw_addr_ptr;
cur_addr = dev_priv->dma_low;
next_addr = cur_addr + size + 512*1024;
next_addr = cur_addr + size + 512 * 1024;
count = 1000000;
do {
hw_addr = *hw_addr_ptr - agp_base;
@ -207,8 +207,8 @@ static int via_initialize(drm_device_t * dev,
dev_priv->dma_offset = init->offset;
dev_priv->last_pause_ptr = NULL;
dev_priv->hw_addr_ptr =
(volatile uint32_t *)((char *)dev_priv->mmio->handle +
init->reg_pause_addr);
(volatile uint32_t *)((char *)dev_priv->mmio->handle +
init->reg_pause_addr);
via_cmdbuf_start(dev_priv);
@ -239,8 +239,8 @@ static int via_dma_init(DRM_IOCTL_ARGS)
retcode = via_dma_cleanup(dev);
break;
case VIA_DMA_INITIALIZED:
retcode = (dev_priv->ring.virtual_start != NULL) ?
0: DRM_ERR( EFAULT );
retcode = (dev_priv->ring.virtual_start != NULL) ?
0 : DRM_ERR(EFAULT);
break;
default:
retcode = DRM_ERR(EINVAL);
@ -268,8 +268,7 @@ static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd)
if (cmd->size > VIA_PCI_BUF_SIZE) {
return DRM_ERR(ENOMEM);
}
}
if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
return DRM_ERR(EFAULT);
@ -292,7 +291,7 @@ static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd)
}
memcpy(vb, dev_priv->pci_buf, cmd->size);
dev_priv->dma_low += cmd->size;
/*
@ -301,7 +300,7 @@ static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd)
*/
if (cmd->size < 0x100)
via_pad_cache(dev_priv,(0x100 - cmd->size) >> 3);
via_pad_cache(dev_priv, (0x100 - cmd->size) >> 3);
via_cmdbuf_pause(dev_priv);
return 0;
@ -321,7 +320,7 @@ static int via_flush_ioctl(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
LOCK_TEST_WITH_RETURN( dev, filp );
LOCK_TEST_WITH_RETURN(dev, filp);
return via_driver_dma_quiescent(dev);
}
@ -332,7 +331,7 @@ static int via_cmdbuffer(DRM_IOCTL_ARGS)
drm_via_cmdbuffer_t cmdbuf;
int ret;
LOCK_TEST_WITH_RETURN( dev, filp );
LOCK_TEST_WITH_RETURN(dev, filp);
DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t __user *) data,
sizeof(cmdbuf));
@ -355,16 +354,16 @@ static int via_dispatch_pci_cmdbuffer(drm_device_t * dev,
if (cmd->size > VIA_PCI_BUF_SIZE) {
return DRM_ERR(ENOMEM);
}
}
if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
return DRM_ERR(EFAULT);
if ((ret =
via_verify_command_stream((uint32_t *)dev_priv->pci_buf,
if ((ret =
via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
cmd->size, dev, 0))) {
return ret;
}
ret =
via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf,
cmd->size);
@ -377,7 +376,7 @@ static int via_pci_cmdbuffer(DRM_IOCTL_ARGS)
drm_via_cmdbuffer_t cmdbuf;
int ret;
LOCK_TEST_WITH_RETURN( dev, filp );
LOCK_TEST_WITH_RETURN(dev, filp);
DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t __user *) data,
sizeof(cmdbuf));
@ -393,7 +392,6 @@ static int via_pci_cmdbuffer(DRM_IOCTL_ARGS)
return 0;
}
static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
uint32_t * vb, int qw_count)
{
@ -403,7 +401,6 @@ static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
return vb;
}
/*
* This function is used internally by ring buffer mangement code.
*
@ -419,8 +416,7 @@ static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv)
* modifying the pause address stored in the buffer itself. If
* the regulator has already paused, restart it.
*/
static int via_hook_segment(drm_via_private_t *dev_priv,
static int via_hook_segment(drm_via_private_t * dev_priv,
uint32_t pause_addr_hi, uint32_t pause_addr_lo,
int no_pci_fire)
{
@ -479,7 +475,7 @@ static int via_wait_idle(drm_via_private_t * dev_priv)
}
static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type,
uint32_t addr, uint32_t *cmd_addr_hi,
uint32_t addr, uint32_t *cmd_addr_hi,
uint32_t *cmd_addr_lo, int skip_wait)
{
uint32_t agp_base;
@ -508,9 +504,6 @@ static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type,
return vb;
}
static void via_cmdbuf_start(drm_via_private_t * dev_priv)
{
uint32_t pause_addr_lo, pause_addr_hi;

View File

@ -255,7 +255,6 @@ static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t * seq,
drm_device_t * dev)
{
#ifdef __linux__
struct list_head *list;
drm_map_list_t *r_list;
#endif
drm_local_map_t *map = seq->map_cache;
@ -265,8 +264,7 @@ static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t * seq,
return map;
}
#ifdef __linux__
list_for_each(list, &dev->maplist->head) {
r_list = (drm_map_list_t *) list;
list_for_each_entry(r_list, &dev->maplist, head) {
map = r_list->map;
if (!map)
continue;