Merged sarea-1-0-0

main
Kevin E Martin 2001-03-14 22:22:50 +00:00
parent e2b2bffc6b
commit 74e19a4018
31 changed files with 1079 additions and 390 deletions

View File

@ -513,6 +513,16 @@ int drmAddMap(int fd,
return 0;
}
int drmRmMap(int fd, drmHandle handle)
{
drm_map_t map;
map.handle = (void *)handle;
if(ioctl(fd, DRM_IOCTL_RM_MAP, &map)) return -errno;
return 0;
}
int drmAddBufs(int fd, int count, int size, drmBufDescFlags flags,
int agp_offset)
{
@ -1088,6 +1098,29 @@ void *drmGetContextTag(int fd, drmContext context)
return value;
}
int drmAddContextPrivateMapping(int fd, drmContext ctx_id, drmHandle handle)
{
drm_ctx_priv_map_t map;
map.ctx_id = ctx_id;
map.handle = (void *)handle;
if (ioctl(fd, DRM_IOCTL_SET_SAREA_CTX, &map)) return -errno;
return 0;
}
int drmGetContextPrivateMapping(int fd, drmContext ctx_id, drmHandlePtr handle)
{
drm_ctx_priv_map_t map;
map.ctx_id = ctx_id;
if (ioctl(fd, DRM_IOCTL_GET_SAREA_CTX, &map)) return -errno;
if (handle) *handle = (drmHandle)map.handle;
return 0;
}
int drmGetMap(int fd, int idx, drmHandle *offset, drmSize *size,
drmMapType *type, drmMapFlags *flags, drmHandle *handle,
int *mtrr)

View File

@ -73,6 +73,7 @@
#if LINUX_VERSION_CODE < 0x020400
#include "compat-pre24.h"
#endif
#include <asm/pgalloc.h>
#include "drm.h"
/* DRM template customization defaults
@ -329,16 +330,19 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
DRM(ioremapfree)( (map)->handle, (map)->size ); \
} while (0)
#define DRM_FIND_MAP(map, o) \
do { \
int i; \
for ( i = 0 ; i < dev->map_count ; i++ ) { \
if ( dev->maplist[i]->offset == o ) { \
map = dev->maplist[i]; \
#define DRM_FIND_MAP(_map, _o) \
do { \
struct list_head *_list; \
list_for_each(_list, &dev->maplist->head) { \
drm_map_list_t *_r_list; \
_r_list = (drm_map_list_t *)_list; \
if(_r_list->map && \
_r_list->map->offset == (_o)) { \
(_map) = _r_list->map; \
break; \
} \
} \
} while (0)
} while(0)
/* Internal types and structures */
#define DRM_ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
@ -349,6 +353,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \
(_map) = (_dev)->context_sareas[_ctx]; \
} while(0)
typedef int drm_ioctl_t( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
@ -577,6 +585,11 @@ typedef struct drm_sigdata {
drm_hw_lock_t *lock;
} drm_sigdata_t;
typedef struct drm_map_list {
struct list_head head;
drm_map_t *map;
} drm_map_list_t;
typedef struct drm_device {
const char *name; /* Simple driver name */
char *unique; /* Unique identifier: e.g., busid */
@ -609,9 +622,12 @@ typedef struct drm_device {
drm_magic_head_t magiclist[DRM_HASH_SIZE];
/* Memory management */
drm_map_t **maplist; /* Vector of pointers to regions */
drm_map_list_t *maplist; /* Linked list of regions */
int map_count; /* Number of mappable regions */
drm_map_t **context_sareas;
int max_context;
drm_vma_entry_t *vmalist; /* List of vmas (for debugging) */
drm_lock_data_t lock; /* Information on hardware lock */
@ -700,9 +716,6 @@ extern unsigned long DRM(vm_nopage)(struct vm_area_struct *vma,
extern unsigned long DRM(vm_shm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
extern unsigned long DRM(vm_shm_nopage_lock)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
extern unsigned long DRM(vm_dma_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
@ -714,15 +727,13 @@ extern struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
extern struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
extern struct page *DRM(vm_shm_nopage_lock)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
extern struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
#endif
extern void DRM(vm_open)(struct vm_area_struct *vma);
extern void DRM(vm_close)(struct vm_area_struct *vma);
extern void DRM(vm_shm_close)(struct vm_area_struct *vma);
extern int DRM(mmap_dma)(struct file *filp,
struct vm_area_struct *vma);
extern int DRM(mmap)(struct file *filp, struct vm_area_struct *vma);
@ -788,6 +799,11 @@ extern int DRM(ctxbitmap_init)( drm_device_t *dev );
extern void DRM(ctxbitmap_cleanup)( drm_device_t *dev );
#endif
extern int DRM(setsareactx)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int DRM(getsareactx)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
/* Drawable IOCTL support (drm_drawable.h) */
extern int DRM(adddraw)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
@ -830,6 +846,8 @@ extern int DRM(notifier)(void *priv);
extern int DRM(order)( unsigned long size );
extern int DRM(addmap)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int DRM(rmmap)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
#if __HAVE_DMA
extern int DRM(addbufs)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );

View File

@ -69,6 +69,7 @@ int DRM(addmap)( struct inode *inode, struct file *filp,
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_map_t *map;
drm_map_list_t *list;
if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
@ -81,6 +82,14 @@ int DRM(addmap)( struct inode *inode, struct file *filp,
return -EFAULT;
}
/* Only allow shared memory to be removable since we only keep enough
* book keeping information about shared memory to allow for removal
* when processes fork.
*/
if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
return -EINVAL;
}
DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
map->offset, map->size, map->type );
if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
@ -100,7 +109,7 @@ int DRM(addmap)( struct inode *inode, struct file *filp,
return -EINVAL;
}
#endif
#ifdef CONFIG_MTRR
#ifdef __REALLY_HAVE_MTRR
if ( map->type == _DRM_FRAME_BUFFER ||
(map->flags & _DRM_WRITE_COMBINING) ) {
map->mtrr = mtrr_add( map->offset, map->size,
@ -111,9 +120,7 @@ int DRM(addmap)( struct inode *inode, struct file *filp,
break;
case _DRM_SHM:
map->handle = (void *)DRM(alloc_pages)( DRM(order)( map->size )
- PAGE_SHIFT,
DRM_MEM_SAREA );
map->handle = vmalloc_32(map->size);
DRM_DEBUG( "%ld %d %p\n",
map->size, DRM(order)( map->size ), map->handle );
if ( !map->handle ) {
@ -136,22 +143,17 @@ int DRM(addmap)( struct inode *inode, struct file *filp,
return -EINVAL;
}
down( &dev->struct_sem );
if ( dev->maplist ) {
++dev->map_count;
dev->maplist = DRM(realloc)( dev->maplist,
(dev->map_count-1)
* sizeof(*dev->maplist),
dev->map_count
* sizeof(*dev->maplist),
DRM_MEM_MAPS );
} else {
dev->map_count = 1;
dev->maplist = DRM(alloc)( dev->map_count*sizeof(*dev->maplist),
DRM_MEM_MAPS );
list = DRM(alloc)(sizeof(*list), DRM_MEM_MAPS);
if(!list) {
DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
return -EINVAL;
}
dev->maplist[dev->map_count-1] = map;
up( &dev->struct_sem );
memset(list, 0, sizeof(*list));
list->map = map;
down(&dev->struct_sem);
list_add(&list->head, &dev->maplist->head);
up(&dev->struct_sem);
if ( copy_to_user( (drm_map_t *)arg, map, sizeof(*map) ) )
return -EFAULT;
@ -164,6 +166,84 @@ int DRM(addmap)( struct inode *inode, struct file *filp,
return 0;
}
/* Remove a map private from list and deallocate resources if the mapping
* isn't in use.
*/
int DRM(rmmap)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
struct list_head *list;
drm_map_list_t *r_list;
drm_vma_entry_t *pt, *prev;
drm_map_t *map;
drm_map_t request;
int found_maps = 0;
if (copy_from_user(&request, (drm_map_t *)arg,
sizeof(request))) {
return -EFAULT;
}
down(&dev->struct_sem);
list = &dev->maplist->head;
list_for_each(list, &dev->maplist->head) {
r_list = (drm_map_list_t *) list;
if(r_list->map &&
r_list->map->handle == request.handle &&
r_list->map->flags & _DRM_REMOVABLE) break;
}
/* List has wrapped around to the head pointer, or its empty we didn't
* find anything.
*/
if(list == (&dev->maplist->head)) {
up(&dev->struct_sem);
return -EINVAL;
}
map = r_list->map;
list_del(list);
DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
#if LINUX_VERSION_CODE >= 0x020300
if (pt->vma->vm_private_data == map) found_maps++;
#else
if (pt->vma->vm_pte == map) found_maps++;
#endif
}
if(!found_maps) {
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
#ifdef __REALLY_HAVE_MTRR
if (map->mtrr >= 0) {
int retcode;
retcode = mtrr_del(map->mtrr,
map->offset,
map->size);
DRM_DEBUG("mtrr_del = %d\n", retcode);
}
#endif
DRM(ioremapfree)(map->handle, map->size);
break;
case _DRM_SHM:
vfree(map->handle);
break;
case _DRM_AGP:
break;
}
DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
}
up(&dev->struct_sem);
return 0;
}
#if __HAVE_DMA
#if __REALLY_HAVE_AGP

View File

@ -41,9 +41,13 @@
void DRM(ctxbitmap_free)( drm_device_t *dev, int ctx_handle )
{
if ( ctx_handle < 0 ) goto failed;
if ( !dev->ctx_bitmap ) goto failed;
if ( ctx_handle < DRM_MAX_CTXBITMAP ) {
down(&dev->struct_sem);
clear_bit( ctx_handle, dev->ctx_bitmap );
dev->context_sareas[ctx_handle] = NULL;
up(&dev->struct_sem);
return;
}
failed:
@ -56,12 +60,37 @@ int DRM(ctxbitmap_next)( drm_device_t *dev )
{
int bit;
if(!dev->ctx_bitmap) return -1;
down(&dev->struct_sem);
bit = find_first_zero_bit( dev->ctx_bitmap, DRM_MAX_CTXBITMAP );
if ( bit < DRM_MAX_CTXBITMAP ) {
set_bit( bit, dev->ctx_bitmap );
DRM_DEBUG( "drm_ctxbitmap_next bit : %d\n", bit );
if((bit+1) > dev->max_context) {
dev->max_context = (bit+1);
if(dev->context_sareas) {
dev->context_sareas = DRM(realloc)(
dev->context_sareas,
(dev->max_context - 1) *
sizeof(*dev->context_sareas),
dev->max_context *
sizeof(*dev->context_sareas),
DRM_MEM_MAPS);
dev->context_sareas[bit] = NULL;
} else {
/* max_context == 1 at this point */
dev->context_sareas = DRM(alloc)(
dev->max_context *
sizeof(*dev->context_sareas),
DRM_MEM_MAPS);
dev->context_sareas[bit] = NULL;
}
}
up(&dev->struct_sem);
return bit;
}
up(&dev->struct_sem);
return -1;
}
@ -70,12 +99,18 @@ int DRM(ctxbitmap_init)( drm_device_t *dev )
int i;
int temp;
down(&dev->struct_sem);
dev->ctx_bitmap = (unsigned long *) DRM(alloc)( PAGE_SIZE,
DRM_MEM_CTXBITMAP );
if ( dev->ctx_bitmap == NULL ) {
up(&dev->struct_sem);
return -ENOMEM;
}
memset( (void *)dev->ctx_bitmap, 0, PAGE_SIZE );
dev->context_sareas = NULL;
dev->max_context = -1;
up(&dev->struct_sem);
for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
temp = DRM(ctxbitmap_next)( dev );
DRM_DEBUG( "drm_ctxbitmap_init : %d\n", temp );
@ -86,9 +121,86 @@ int DRM(ctxbitmap_init)( drm_device_t *dev )
void DRM(ctxbitmap_cleanup)( drm_device_t *dev )
{
down(&dev->struct_sem);
if( dev->context_sareas ) DRM(free)( dev->context_sareas,
sizeof(*dev->context_sareas) *
dev->max_context,
DRM_MEM_MAPS );
DRM(free)( (void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP );
up(&dev->struct_sem);
}
/* ================================================================
* Per Context SAREA Support
*/
int DRM(getsareactx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_priv_map_t request;
drm_map_t *map;
if (copy_from_user(&request,
(drm_ctx_priv_map_t *)arg,
sizeof(request)))
return -EFAULT;
down(&dev->struct_sem);
if ((int)request.ctx_id >= dev->max_context) {
up(&dev->struct_sem);
return -EINVAL;
}
map = dev->context_sareas[request.ctx_id];
up(&dev->struct_sem);
request.handle = map->handle;
if (copy_to_user((drm_ctx_priv_map_t *)arg, &request, sizeof(request)))
return -EFAULT;
return 0;
}
int DRM(setsareactx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_priv_map_t request;
drm_map_t *map = NULL;
drm_map_list_t *r_list;
struct list_head *list;
if (copy_from_user(&request,
(drm_ctx_priv_map_t *)arg,
sizeof(request)))
return -EFAULT;
down(&dev->struct_sem);
list_for_each(list, &dev->maplist->head) {
r_list = (drm_map_list_t *)list;
if(r_list->map &&
r_list->map->handle == request.handle) break;
}
if (list == &(dev->maplist->head)) {
up(&dev->struct_sem);
return -EINVAL;
}
map = r_list->map;
up(&dev->struct_sem);
if (!map) return -EINVAL;
down(&dev->struct_sem);
if ((int)request.ctx_id >= dev->max_context) {
up(&dev->struct_sem);
return -EINVAL;
}
dev->context_sareas[request.ctx_id] = map;
up(&dev->struct_sem);
return 0;
}
/* ================================================================
* The actual DRM context handling routines

View File

@ -133,6 +133,10 @@ static drm_ioctl_desc_t DRM(ioctls)[] = {
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { DRM(authmagic), 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { DRM(addmap), 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { DRM(rmmap), 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { DRM(setsareactx), 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { DRM(getsareactx), 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { DRM(addctx), 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { DRM(rmctx), 1, 1 },
@ -262,8 +266,14 @@ static int DRM(setup)( drm_device_t *dev )
dev->magiclist[i].head = NULL;
dev->magiclist[i].tail = NULL;
}
dev->maplist = NULL;
dev->maplist = DRM(alloc)(sizeof(*dev->maplist),
DRM_MEM_MAPS);
if(dev->maplist == NULL) return -ENOMEM;
memset(dev->maplist, 0, sizeof(*dev->maplist));
INIT_LIST_HEAD(&dev->maplist->head);
dev->map_count = 0;
dev->vmalist = NULL;
dev->lock.hw_lock = NULL;
init_waitqueue_head( &dev->lock.lock_queue );
@ -307,6 +317,8 @@ static int DRM(takedown)( drm_device_t *dev )
{
drm_magic_entry_t *pt, *next;
drm_map_t *map;
drm_map_list_t *r_list;
struct list_head *list;
drm_vma_entry_t *vma, *vma_next;
int i;
@ -373,10 +385,13 @@ static int DRM(takedown)( drm_device_t *dev )
dev->vmalist = NULL;
}
/* Clear map area and mtrr information */
if ( dev->maplist ) {
for ( i = 0 ; i < dev->map_count ; i++ ) {
map = dev->maplist[i];
if( dev->maplist ) {
list_for_each(list, &dev->maplist->head) {
r_list = (drm_map_list_t *)list;
map = r_list->map;
DRM(free)(r_list, sizeof(*r_list), DRM_MEM_MAPS);
if(!map) continue;
switch ( map->type ) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
@ -392,24 +407,19 @@ static int DRM(takedown)( drm_device_t *dev )
DRM(ioremapfree)( map->handle, map->size );
break;
case _DRM_SHM:
DRM(free_pages)( (unsigned long)map->handle,
DRM(order)( map->size )
- PAGE_SHIFT,
DRM_MEM_SAREA );
vfree(map->handle);
break;
case _DRM_AGP:
/* Do nothing here, because this is all
* handled in the AGP/GART driver.
*/
break;
}
DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
}
DRM(free)( dev->maplist,
dev->map_count * sizeof(*dev->maplist),
DRM_MEM_MAPS );
DRM(free)(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
dev->maplist = NULL;
dev->map_count = 0;
}
#if __HAVE_DMA_QUEUE || __HAVE_MULTIPLE_DMA_QUEUES

View File

@ -105,22 +105,40 @@ int DRM(getmap)( struct inode *inode, struct file *filp,
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_map_t map;
drm_map_list_t *r_list = NULL;
struct list_head *list;
int idx;
int i;
if (copy_from_user(&map, (drm_map_t *)arg, sizeof(map)))
return -EFAULT;
idx = map.offset;
down(&dev->struct_sem);
if (idx < 0 || idx >= dev->map_count) {
up(&dev->struct_sem);
return -EINVAL;
}
map.offset = dev->maplist[idx]->offset;
map.size = dev->maplist[idx]->size;
map.type = dev->maplist[idx]->type;
map.flags = dev->maplist[idx]->flags;
map.handle = dev->maplist[idx]->handle;
map.mtrr = dev->maplist[idx]->mtrr;
i = 0;
list_for_each(list, &dev->maplist->head) {
if(i == idx) {
r_list = (drm_map_list_t *)list;
break;
}
i++;
}
if(!r_list || !r_list->map) {
up(&dev->struct_sem);
return -EINVAL;
}
map.offset = r_list->map->offset;
map.size = r_list->map->size;
map.type = r_list->map->type;
map.flags = r_list->map->flags;
map.handle = r_list->map->handle;
map.mtrr = r_list->map->mtrr;
up(&dev->struct_sem);
if (copy_to_user((drm_map_t *)arg, &map, sizeof(map))) return -EFAULT;

View File

@ -165,6 +165,9 @@ static int DRM(_vm_info)(char *buf, char **start, off_t offset, int request,
drm_device_t *dev = (drm_device_t *)data;
int len = 0;
drm_map_t *map;
drm_map_list_t *r_list;
struct list_head *list;
/* Hardcoded from _DRM_FRAME_BUFFER,
_DRM_REGISTERS, _DRM_SHM, and
_DRM_AGP. */
@ -182,8 +185,11 @@ static int DRM(_vm_info)(char *buf, char **start, off_t offset, int request,
DRM_PROC_PRINT("slot offset size type flags "
"address mtrr\n\n");
for (i = 0; i < dev->map_count; i++) {
map = dev->maplist[i];
i = 0;
list_for_each(list, &dev->maplist->head) {
r_list = (drm_map_list_t *)list;
map = r_list->map;
if(!map) continue;
if (map->type < 0 || map->type > 3) type = "??";
else type = types[map->type];
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
@ -198,6 +204,7 @@ static int DRM(_vm_info)(char *buf, char **start, off_t offset, int request,
} else {
DRM_PROC_PRINT("%4d\n", map->mtrr);
}
i++;
}
if (len > request + offset) return request;

View File

@ -41,13 +41,7 @@ struct vm_operations_struct drm_vm_ops = {
struct vm_operations_struct drm_vm_shm_ops = {
nopage: DRM(vm_shm_nopage),
open: DRM(vm_open),
close: DRM(vm_close),
};
struct vm_operations_struct drm_vm_shm_lock_ops = {
nopage: DRM(vm_shm_nopage_lock),
open: DRM(vm_open),
close: DRM(vm_close),
close: DRM(vm_shm_close),
};
struct vm_operations_struct drm_vm_dma_ops = {
@ -88,12 +82,26 @@ struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
#endif
unsigned long physical;
unsigned long offset;
unsigned long i;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
if (!map) return NOPAGE_OOM; /* Nothing allocated */
offset = address - vma->vm_start;
physical = (unsigned long)map->handle + offset;
i = (unsigned long)map->handle + offset;
/* We have to walk page tables here because we need large SAREA's, and
* they need to be virtually contigious in kernel space.
*/
pgd = pgd_offset_k( i );
if( !pgd_present( *pgd ) ) return NOPAGE_OOM;
pmd = pmd_offset( pgd, i );
if( !pmd_present( *pmd ) ) return NOPAGE_OOM;
pte = pte_offset( pmd, i );
if( !pte_present( *pte ) ) return NOPAGE_OOM;
physical = (unsigned long)pte_page( *pte )->virtual;
atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
DRM_DEBUG("0x%08lx => 0x%08lx\n", address, physical);
@ -104,37 +112,87 @@ struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
#endif
}
#if LINUX_VERSION_CODE < 0x020317
unsigned long DRM(vm_shm_nopage_lock)(struct vm_area_struct *vma,
unsigned long address,
int write_access)
#else
/* Return type changed in 2.3.23 */
struct page *DRM(vm_shm_nopage_lock)(struct vm_area_struct *vma,
unsigned long address,
int write_access)
#endif
/* Special close routine which deletes map information if we are the last
* person to close a mapping and its not in the global maplist.
*/
void DRM(vm_shm_close)(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
unsigned long physical;
unsigned long offset;
unsigned long page;
drm_vma_entry_t *pt, *prev;
drm_map_t *map;
drm_map_list_t *r_list;
struct list_head *list;
int found_maps = 0;
if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
if (!dev->lock.hw_lock) return NOPAGE_OOM; /* Nothing allocated */
offset = address - vma->vm_start;
page = offset >> PAGE_SHIFT;
physical = (unsigned long)dev->lock.hw_lock + offset;
atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical);
#if LINUX_VERSION_CODE < 0x020317
return physical;
#else
return virt_to_page(physical);
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
#if LINUX_VERSION_CODE < 0x020333
MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
#endif
atomic_dec(&dev->vma_count);
#if LINUX_VERSION_CODE >= 0x020300
map = vma->vm_private_data;
#else
map = vma->vm_pte;
#endif
down(&dev->struct_sem);
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
#if LINUX_VERSION_CODE >= 0x020300
if (pt->vma->vm_private_data == map) found_maps++;
#else
if (pt->vma->vm_pte == map) found_maps++;
#endif
if (pt->vma == vma) {
if (prev) {
prev->next = pt->next;
} else {
dev->vmalist = pt->next;
}
DRM(free)(pt, sizeof(*pt), DRM_MEM_VMAS);
}
}
/* We were the only map that was found */
if(found_maps == 1 &&
map->flags & _DRM_REMOVABLE) {
/* Check to see if we are in the maplist, if we are not, then
* we delete this mappings information.
*/
found_maps = 0;
list = &dev->maplist->head;
list_for_each(list, &dev->maplist->head) {
r_list = (drm_map_list_t *) list;
if (r_list->map == map) found_maps++;
}
if(!found_maps) {
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
#ifdef __REALLY_HAVE_MTRR
if (map->mtrr >= 0) {
int retcode;
retcode = mtrr_del(map->mtrr,
map->offset,
map->size);
DRM_DEBUG("mtrr_del = %d\n", retcode);
}
#endif
DRM(ioremapfree)(map->handle, map->size);
break;
case _DRM_SHM:
vfree(map->handle);
break;
case _DRM_AGP:
break;
}
DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
}
}
up(&dev->struct_sem);
}
#if LINUX_VERSION_CODE < 0x020317
@ -176,9 +234,7 @@ void DRM(vm_open)(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
#if DRM_DEBUG_CODE
drm_vma_entry_t *vma_entry;
#endif
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
@ -188,8 +244,6 @@ void DRM(vm_open)(struct vm_area_struct *vma)
MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
#endif
#if DRM_DEBUG_CODE
vma_entry = DRM(alloc)(sizeof(*vma_entry), DRM_MEM_VMAS);
if (vma_entry) {
down(&dev->struct_sem);
@ -199,16 +253,13 @@ void DRM(vm_open)(struct vm_area_struct *vma)
dev->vmalist = vma_entry;
up(&dev->struct_sem);
}
#endif
}
void DRM(vm_close)(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
#if DRM_DEBUG_CODE
drm_vma_entry_t *pt, *prev;
#endif
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
@ -217,7 +268,6 @@ void DRM(vm_close)(struct vm_area_struct *vma)
#endif
atomic_dec(&dev->vma_count);
#if DRM_DEBUG_CODE
down(&dev->struct_sem);
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
if (pt->vma == vma) {
@ -231,7 +281,6 @@ void DRM(vm_close)(struct vm_area_struct *vma)
}
}
up(&dev->struct_sem);
#endif
}
int DRM(mmap_dma)(struct file *filp, struct vm_area_struct *vma)
@ -272,7 +321,8 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_map_t *map = NULL;
int i;
drm_map_list_t *r_list;
struct list_head *list;
DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
vma->vm_start, vma->vm_end, VM_OFFSET(vma));
@ -286,12 +336,13 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
once, so it doesn't have to be optimized
for performance, even if the list was a
bit longer. */
for (i = 0; i < dev->map_count; i++) {
map = dev->maplist[i];
list_for_each(list, &dev->maplist->head) {
r_list = (drm_map_list_t *)list;
map = r_list->map;
if (!map) continue;
if (map->offset == VM_OFFSET(vma)) break;
}
if (i >= dev->map_count) return -EINVAL;
if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
return -EPERM;
@ -339,17 +390,12 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
vma->vm_ops = &drm_vm_ops;
break;
case _DRM_SHM:
if (map->flags & _DRM_CONTAINS_LOCK)
vma->vm_ops = &drm_vm_shm_lock_ops;
else {
vma->vm_ops = &drm_vm_shm_ops;
#if LINUX_VERSION_CODE >= 0x020300
vma->vm_private_data = (void *)map;
#else
vma->vm_pte = (unsigned long)map;
#endif
}
/* Don't let this area swap. Change when
DRM_KERNEL advisory is supported. */
vma->vm_flags |= VM_LOCKED;

View File

@ -396,23 +396,26 @@ static int i810_dma_initialize(drm_device_t *dev,
drm_i810_private_t *dev_priv,
drm_i810_init_t *init)
{
drm_map_t *sarea_map;
struct list_head *list;
dev->dev_private = (void *) dev_priv;
memset(dev_priv, 0, sizeof(drm_i810_private_t));
if (init->ring_map_idx >= dev->map_count ||
init->buffer_map_idx >= dev->map_count) {
i810_dma_cleanup(dev);
DRM_ERROR("ring_map or buffer_map are invalid\n");
return -EINVAL;
list_for_each(list, &dev->maplist->head) {
drm_map_list_t *r_list = (drm_map_list_t *)list;
if( r_list->map &&
r_list->map->type == _DRM_SHM &&
r_list->map->flags & _DRM_CONTAINS_LOCK ) {
dev_priv->sarea_map = r_list->map;
break;
}
}
dev_priv->ring_map_idx = init->ring_map_idx;
dev_priv->buffer_map_idx = init->buffer_map_idx;
sarea_map = dev->maplist[0];
DRM_FIND_MAP( dev_priv->mmio_map, init->mmio_offset );
DRM_FIND_MAP( dev_priv->buffer_map, init->buffers_offset );
dev_priv->sarea_priv = (drm_i810_sarea_t *)
((u8 *)sarea_map->handle +
((u8 *)dev_priv->sarea_map->handle +
init->sarea_priv_offset);
atomic_set(&dev_priv->flush_done, 0);
@ -865,6 +868,7 @@ static void i810_dma_dispatch_vertex(drm_device_t *dev,
void i810_dma_service(int irq, void *device, struct pt_regs *regs)
{
drm_device_t *dev = (drm_device_t *)device;
drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
u16 temp;
atomic_inc(&dev->counts[_DRM_STAT_IRQ]);

View File

@ -98,8 +98,8 @@ typedef struct _drm_i810_init {
I810_INIT_DMA = 0x01,
I810_CLEANUP_DMA = 0x02
} func;
int ring_map_idx;
int buffer_map_idx;
unsigned int mmio_offset;
unsigned int buffers_offset;
int sarea_priv_offset;
unsigned int ring_start;
unsigned int ring_end;

View File

@ -54,11 +54,12 @@ typedef struct _drm_i810_ring_buffer{
} drm_i810_ring_buffer_t;
typedef struct drm_i810_private {
int ring_map_idx;
int buffer_map_idx;
drm_map_t *sarea_map;
drm_map_t *buffer_map;
drm_map_t *mmio_map;
drm_i810_ring_buffer_t ring;
drm_i810_sarea_t *sarea_priv;
drm_i810_ring_buffer_t ring;
unsigned long hw_status_page;
unsigned long counter;
@ -108,9 +109,8 @@ int i810_clear_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
#define I810_REG(reg) 2
#define I810_BASE(reg) ((unsigned long) \
dev->maplist[I810_REG(reg)]->handle)
dev_priv->mmio_map->handle)
#define I810_ADDR(reg) (I810_BASE(reg) + reg)
#define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg)
#define I810_READ(reg) I810_DEREF(reg)

View File

@ -138,9 +138,15 @@ typedef enum drm_map_flags {
_DRM_LOCKED = 0x04, /* shared, cached, locked */
_DRM_KERNEL = 0x08, /* kernel requires access */
_DRM_WRITE_COMBINING = 0x10, /* use write-combining if available */
_DRM_CONTAINS_LOCK = 0x20 /* SHM page that contains lock */
_DRM_CONTAINS_LOCK = 0x20, /* SHM page that contains lock */
_DRM_REMOVABLE = 0x40 /* Removable mapping */
} drm_map_flags_t;
typedef struct drm_ctx_priv_map {
unsigned int ctx_id; /* Context requesting private mapping */
void *handle; /* Handle of map */
} drm_ctx_priv_map_t;
typedef struct drm_map {
unsigned long offset; /* Requested physical address (0 for SAREA)*/
unsigned long size; /* Requested physical size (bytes) */
@ -365,6 +371,11 @@ typedef struct drm_agp_info {
#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t)
#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t)
#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, drm_map_t)
#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, drm_ctx_priv_map_t)
#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, drm_ctx_priv_map_t)
#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t)
#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t)
#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t)

View File

@ -73,6 +73,7 @@
#if LINUX_VERSION_CODE < 0x020400
#include "compat-pre24.h"
#endif
#include <asm/pgalloc.h>
#include "drm.h"
/* DRM template customization defaults
@ -329,16 +330,19 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
DRM(ioremapfree)( (map)->handle, (map)->size ); \
} while (0)
#define DRM_FIND_MAP(map, o) \
do { \
int i; \
for ( i = 0 ; i < dev->map_count ; i++ ) { \
if ( dev->maplist[i]->offset == o ) { \
map = dev->maplist[i]; \
#define DRM_FIND_MAP(_map, _o) \
do { \
struct list_head *_list; \
list_for_each(_list, &dev->maplist->head) { \
drm_map_list_t *_r_list; \
_r_list = (drm_map_list_t *)_list; \
if(_r_list->map && \
_r_list->map->offset == (_o)) { \
(_map) = _r_list->map; \
break; \
} \
} \
} while (0)
} while(0)
/* Internal types and structures */
#define DRM_ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
@ -349,6 +353,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \
(_map) = (_dev)->context_sareas[_ctx]; \
} while(0)
typedef int drm_ioctl_t( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
@ -577,6 +585,11 @@ typedef struct drm_sigdata {
drm_hw_lock_t *lock;
} drm_sigdata_t;
typedef struct drm_map_list {
struct list_head head;
drm_map_t *map;
} drm_map_list_t;
typedef struct drm_device {
const char *name; /* Simple driver name */
char *unique; /* Unique identifier: e.g., busid */
@ -609,9 +622,12 @@ typedef struct drm_device {
drm_magic_head_t magiclist[DRM_HASH_SIZE];
/* Memory management */
drm_map_t **maplist; /* Vector of pointers to regions */
drm_map_list_t *maplist; /* Linked list of regions */
int map_count; /* Number of mappable regions */
drm_map_t **context_sareas;
int max_context;
drm_vma_entry_t *vmalist; /* List of vmas (for debugging) */
drm_lock_data_t lock; /* Information on hardware lock */
@ -700,9 +716,6 @@ extern unsigned long DRM(vm_nopage)(struct vm_area_struct *vma,
extern unsigned long DRM(vm_shm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
extern unsigned long DRM(vm_shm_nopage_lock)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
extern unsigned long DRM(vm_dma_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
@ -714,15 +727,13 @@ extern struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
extern struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
extern struct page *DRM(vm_shm_nopage_lock)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
extern struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
#endif
extern void DRM(vm_open)(struct vm_area_struct *vma);
extern void DRM(vm_close)(struct vm_area_struct *vma);
extern void DRM(vm_shm_close)(struct vm_area_struct *vma);
extern int DRM(mmap_dma)(struct file *filp,
struct vm_area_struct *vma);
extern int DRM(mmap)(struct file *filp, struct vm_area_struct *vma);
@ -788,6 +799,11 @@ extern int DRM(ctxbitmap_init)( drm_device_t *dev );
extern void DRM(ctxbitmap_cleanup)( drm_device_t *dev );
#endif
extern int DRM(setsareactx)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int DRM(getsareactx)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
/* Drawable IOCTL support (drm_drawable.h) */
extern int DRM(adddraw)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
@ -830,6 +846,8 @@ extern int DRM(notifier)(void *priv);
extern int DRM(order)( unsigned long size );
extern int DRM(addmap)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int DRM(rmmap)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
#if __HAVE_DMA
extern int DRM(addbufs)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );

View File

@ -69,6 +69,7 @@ int DRM(addmap)( struct inode *inode, struct file *filp,
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_map_t *map;
drm_map_list_t *list;
if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
@ -81,6 +82,14 @@ int DRM(addmap)( struct inode *inode, struct file *filp,
return -EFAULT;
}
/* Only allow shared memory to be removable since we only keep enough
* book keeping information about shared memory to allow for removal
* when processes fork.
*/
if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
return -EINVAL;
}
DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
map->offset, map->size, map->type );
if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
@ -100,7 +109,7 @@ int DRM(addmap)( struct inode *inode, struct file *filp,
return -EINVAL;
}
#endif
#ifdef CONFIG_MTRR
#ifdef __REALLY_HAVE_MTRR
if ( map->type == _DRM_FRAME_BUFFER ||
(map->flags & _DRM_WRITE_COMBINING) ) {
map->mtrr = mtrr_add( map->offset, map->size,
@ -111,9 +120,7 @@ int DRM(addmap)( struct inode *inode, struct file *filp,
break;
case _DRM_SHM:
map->handle = (void *)DRM(alloc_pages)( DRM(order)( map->size )
- PAGE_SHIFT,
DRM_MEM_SAREA );
map->handle = vmalloc_32(map->size);
DRM_DEBUG( "%ld %d %p\n",
map->size, DRM(order)( map->size ), map->handle );
if ( !map->handle ) {
@ -136,22 +143,17 @@ int DRM(addmap)( struct inode *inode, struct file *filp,
return -EINVAL;
}
down( &dev->struct_sem );
if ( dev->maplist ) {
++dev->map_count;
dev->maplist = DRM(realloc)( dev->maplist,
(dev->map_count-1)
* sizeof(*dev->maplist),
dev->map_count
* sizeof(*dev->maplist),
DRM_MEM_MAPS );
} else {
dev->map_count = 1;
dev->maplist = DRM(alloc)( dev->map_count*sizeof(*dev->maplist),
DRM_MEM_MAPS );
list = DRM(alloc)(sizeof(*list), DRM_MEM_MAPS);
if(!list) {
DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
return -EINVAL;
}
dev->maplist[dev->map_count-1] = map;
up( &dev->struct_sem );
memset(list, 0, sizeof(*list));
list->map = map;
down(&dev->struct_sem);
list_add(&list->head, &dev->maplist->head);
up(&dev->struct_sem);
if ( copy_to_user( (drm_map_t *)arg, map, sizeof(*map) ) )
return -EFAULT;
@ -164,6 +166,84 @@ int DRM(addmap)( struct inode *inode, struct file *filp,
return 0;
}
/* Remove a map private from list and deallocate resources if the mapping
* isn't in use.
*/
int DRM(rmmap)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
struct list_head *list;
drm_map_list_t *r_list;
drm_vma_entry_t *pt, *prev;
drm_map_t *map;
drm_map_t request;
int found_maps = 0;
if (copy_from_user(&request, (drm_map_t *)arg,
sizeof(request))) {
return -EFAULT;
}
down(&dev->struct_sem);
list = &dev->maplist->head;
list_for_each(list, &dev->maplist->head) {
r_list = (drm_map_list_t *) list;
if(r_list->map &&
r_list->map->handle == request.handle &&
r_list->map->flags & _DRM_REMOVABLE) break;
}
/* List has wrapped around to the head pointer, or its empty we didn't
* find anything.
*/
if(list == (&dev->maplist->head)) {
up(&dev->struct_sem);
return -EINVAL;
}
map = r_list->map;
list_del(list);
DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
#if LINUX_VERSION_CODE >= 0x020300
if (pt->vma->vm_private_data == map) found_maps++;
#else
if (pt->vma->vm_pte == map) found_maps++;
#endif
}
if(!found_maps) {
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
#ifdef __REALLY_HAVE_MTRR
if (map->mtrr >= 0) {
int retcode;
retcode = mtrr_del(map->mtrr,
map->offset,
map->size);
DRM_DEBUG("mtrr_del = %d\n", retcode);
}
#endif
DRM(ioremapfree)(map->handle, map->size);
break;
case _DRM_SHM:
vfree(map->handle);
break;
case _DRM_AGP:
break;
}
DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
}
up(&dev->struct_sem);
return 0;
}
#if __HAVE_DMA
#if __REALLY_HAVE_AGP

View File

@ -41,9 +41,13 @@
void DRM(ctxbitmap_free)( drm_device_t *dev, int ctx_handle )
{
if ( ctx_handle < 0 ) goto failed;
if ( !dev->ctx_bitmap ) goto failed;
if ( ctx_handle < DRM_MAX_CTXBITMAP ) {
down(&dev->struct_sem);
clear_bit( ctx_handle, dev->ctx_bitmap );
dev->context_sareas[ctx_handle] = NULL;
up(&dev->struct_sem);
return;
}
failed:
@ -56,12 +60,37 @@ int DRM(ctxbitmap_next)( drm_device_t *dev )
{
int bit;
if(!dev->ctx_bitmap) return -1;
down(&dev->struct_sem);
bit = find_first_zero_bit( dev->ctx_bitmap, DRM_MAX_CTXBITMAP );
if ( bit < DRM_MAX_CTXBITMAP ) {
set_bit( bit, dev->ctx_bitmap );
DRM_DEBUG( "drm_ctxbitmap_next bit : %d\n", bit );
if((bit+1) > dev->max_context) {
dev->max_context = (bit+1);
if(dev->context_sareas) {
dev->context_sareas = DRM(realloc)(
dev->context_sareas,
(dev->max_context - 1) *
sizeof(*dev->context_sareas),
dev->max_context *
sizeof(*dev->context_sareas),
DRM_MEM_MAPS);
dev->context_sareas[bit] = NULL;
} else {
/* max_context == 1 at this point */
dev->context_sareas = DRM(alloc)(
dev->max_context *
sizeof(*dev->context_sareas),
DRM_MEM_MAPS);
dev->context_sareas[bit] = NULL;
}
}
up(&dev->struct_sem);
return bit;
}
up(&dev->struct_sem);
return -1;
}
@ -70,12 +99,18 @@ int DRM(ctxbitmap_init)( drm_device_t *dev )
int i;
int temp;
down(&dev->struct_sem);
dev->ctx_bitmap = (unsigned long *) DRM(alloc)( PAGE_SIZE,
DRM_MEM_CTXBITMAP );
if ( dev->ctx_bitmap == NULL ) {
up(&dev->struct_sem);
return -ENOMEM;
}
memset( (void *)dev->ctx_bitmap, 0, PAGE_SIZE );
dev->context_sareas = NULL;
dev->max_context = -1;
up(&dev->struct_sem);
for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
temp = DRM(ctxbitmap_next)( dev );
DRM_DEBUG( "drm_ctxbitmap_init : %d\n", temp );
@ -86,9 +121,86 @@ int DRM(ctxbitmap_init)( drm_device_t *dev )
void DRM(ctxbitmap_cleanup)( drm_device_t *dev )
{
down(&dev->struct_sem);
if( dev->context_sareas ) DRM(free)( dev->context_sareas,
sizeof(*dev->context_sareas) *
dev->max_context,
DRM_MEM_MAPS );
DRM(free)( (void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP );
up(&dev->struct_sem);
}
/* ================================================================
* Per Context SAREA Support
*/
int DRM(getsareactx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_priv_map_t request;
drm_map_t *map;
if (copy_from_user(&request,
(drm_ctx_priv_map_t *)arg,
sizeof(request)))
return -EFAULT;
down(&dev->struct_sem);
if ((int)request.ctx_id >= dev->max_context) {
up(&dev->struct_sem);
return -EINVAL;
}
map = dev->context_sareas[request.ctx_id];
up(&dev->struct_sem);
request.handle = map->handle;
if (copy_to_user((drm_ctx_priv_map_t *)arg, &request, sizeof(request)))
return -EFAULT;
return 0;
}
int DRM(setsareactx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_priv_map_t request;
drm_map_t *map = NULL;
drm_map_list_t *r_list;
struct list_head *list;
if (copy_from_user(&request,
(drm_ctx_priv_map_t *)arg,
sizeof(request)))
return -EFAULT;
down(&dev->struct_sem);
list_for_each(list, &dev->maplist->head) {
r_list = (drm_map_list_t *)list;
if(r_list->map &&
r_list->map->handle == request.handle) break;
}
if (list == &(dev->maplist->head)) {
up(&dev->struct_sem);
return -EINVAL;
}
map = r_list->map;
up(&dev->struct_sem);
if (!map) return -EINVAL;
down(&dev->struct_sem);
if ((int)request.ctx_id >= dev->max_context) {
up(&dev->struct_sem);
return -EINVAL;
}
dev->context_sareas[request.ctx_id] = map;
up(&dev->struct_sem);
return 0;
}
/* ================================================================
* The actual DRM context handling routines

View File

@ -133,6 +133,10 @@ static drm_ioctl_desc_t DRM(ioctls)[] = {
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { DRM(authmagic), 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { DRM(addmap), 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { DRM(rmmap), 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { DRM(setsareactx), 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { DRM(getsareactx), 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { DRM(addctx), 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { DRM(rmctx), 1, 1 },
@ -262,8 +266,14 @@ static int DRM(setup)( drm_device_t *dev )
dev->magiclist[i].head = NULL;
dev->magiclist[i].tail = NULL;
}
dev->maplist = NULL;
dev->maplist = DRM(alloc)(sizeof(*dev->maplist),
DRM_MEM_MAPS);
if(dev->maplist == NULL) return -ENOMEM;
memset(dev->maplist, 0, sizeof(*dev->maplist));
INIT_LIST_HEAD(&dev->maplist->head);
dev->map_count = 0;
dev->vmalist = NULL;
dev->lock.hw_lock = NULL;
init_waitqueue_head( &dev->lock.lock_queue );
@ -307,6 +317,8 @@ static int DRM(takedown)( drm_device_t *dev )
{
drm_magic_entry_t *pt, *next;
drm_map_t *map;
drm_map_list_t *r_list;
struct list_head *list;
drm_vma_entry_t *vma, *vma_next;
int i;
@ -373,10 +385,13 @@ static int DRM(takedown)( drm_device_t *dev )
dev->vmalist = NULL;
}
/* Clear map area and mtrr information */
if ( dev->maplist ) {
for ( i = 0 ; i < dev->map_count ; i++ ) {
map = dev->maplist[i];
if( dev->maplist ) {
list_for_each(list, &dev->maplist->head) {
r_list = (drm_map_list_t *)list;
map = r_list->map;
DRM(free)(r_list, sizeof(*r_list), DRM_MEM_MAPS);
if(!map) continue;
switch ( map->type ) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
@ -392,24 +407,19 @@ static int DRM(takedown)( drm_device_t *dev )
DRM(ioremapfree)( map->handle, map->size );
break;
case _DRM_SHM:
DRM(free_pages)( (unsigned long)map->handle,
DRM(order)( map->size )
- PAGE_SHIFT,
DRM_MEM_SAREA );
vfree(map->handle);
break;
case _DRM_AGP:
/* Do nothing here, because this is all
* handled in the AGP/GART driver.
*/
break;
}
DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
}
DRM(free)( dev->maplist,
dev->map_count * sizeof(*dev->maplist),
DRM_MEM_MAPS );
DRM(free)(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
dev->maplist = NULL;
dev->map_count = 0;
}
#if __HAVE_DMA_QUEUE || __HAVE_MULTIPLE_DMA_QUEUES

View File

@ -105,22 +105,40 @@ int DRM(getmap)( struct inode *inode, struct file *filp,
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_map_t map;
drm_map_list_t *r_list = NULL;
struct list_head *list;
int idx;
int i;
if (copy_from_user(&map, (drm_map_t *)arg, sizeof(map)))
return -EFAULT;
idx = map.offset;
down(&dev->struct_sem);
if (idx < 0 || idx >= dev->map_count) {
up(&dev->struct_sem);
return -EINVAL;
}
map.offset = dev->maplist[idx]->offset;
map.size = dev->maplist[idx]->size;
map.type = dev->maplist[idx]->type;
map.flags = dev->maplist[idx]->flags;
map.handle = dev->maplist[idx]->handle;
map.mtrr = dev->maplist[idx]->mtrr;
i = 0;
list_for_each(list, &dev->maplist->head) {
if(i == idx) {
r_list = (drm_map_list_t *)list;
break;
}
i++;
}
if(!r_list || !r_list->map) {
up(&dev->struct_sem);
return -EINVAL;
}
map.offset = r_list->map->offset;
map.size = r_list->map->size;
map.type = r_list->map->type;
map.flags = r_list->map->flags;
map.handle = r_list->map->handle;
map.mtrr = r_list->map->mtrr;
up(&dev->struct_sem);
if (copy_to_user((drm_map_t *)arg, &map, sizeof(map))) return -EFAULT;

View File

@ -165,6 +165,9 @@ static int DRM(_vm_info)(char *buf, char **start, off_t offset, int request,
drm_device_t *dev = (drm_device_t *)data;
int len = 0;
drm_map_t *map;
drm_map_list_t *r_list;
struct list_head *list;
/* Hardcoded from _DRM_FRAME_BUFFER,
_DRM_REGISTERS, _DRM_SHM, and
_DRM_AGP. */
@ -182,8 +185,11 @@ static int DRM(_vm_info)(char *buf, char **start, off_t offset, int request,
DRM_PROC_PRINT("slot offset size type flags "
"address mtrr\n\n");
for (i = 0; i < dev->map_count; i++) {
map = dev->maplist[i];
i = 0;
list_for_each(list, &dev->maplist->head) {
r_list = (drm_map_list_t *)list;
map = r_list->map;
if(!map) continue;
if (map->type < 0 || map->type > 3) type = "??";
else type = types[map->type];
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
@ -198,6 +204,7 @@ static int DRM(_vm_info)(char *buf, char **start, off_t offset, int request,
} else {
DRM_PROC_PRINT("%4d\n", map->mtrr);
}
i++;
}
if (len > request + offset) return request;

View File

@ -41,13 +41,7 @@ struct vm_operations_struct drm_vm_ops = {
struct vm_operations_struct drm_vm_shm_ops = {
nopage: DRM(vm_shm_nopage),
open: DRM(vm_open),
close: DRM(vm_close),
};
struct vm_operations_struct drm_vm_shm_lock_ops = {
nopage: DRM(vm_shm_nopage_lock),
open: DRM(vm_open),
close: DRM(vm_close),
close: DRM(vm_shm_close),
};
struct vm_operations_struct drm_vm_dma_ops = {
@ -88,12 +82,26 @@ struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
#endif
unsigned long physical;
unsigned long offset;
unsigned long i;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
if (!map) return NOPAGE_OOM; /* Nothing allocated */
offset = address - vma->vm_start;
physical = (unsigned long)map->handle + offset;
i = (unsigned long)map->handle + offset;
/* We have to walk page tables here because we need large SAREA's, and
* they need to be virtually contigious in kernel space.
*/
pgd = pgd_offset_k( i );
if( !pgd_present( *pgd ) ) return NOPAGE_OOM;
pmd = pmd_offset( pgd, i );
if( !pmd_present( *pmd ) ) return NOPAGE_OOM;
pte = pte_offset( pmd, i );
if( !pte_present( *pte ) ) return NOPAGE_OOM;
physical = (unsigned long)pte_page( *pte )->virtual;
atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
DRM_DEBUG("0x%08lx => 0x%08lx\n", address, physical);
@ -104,37 +112,87 @@ struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
#endif
}
#if LINUX_VERSION_CODE < 0x020317
unsigned long DRM(vm_shm_nopage_lock)(struct vm_area_struct *vma,
unsigned long address,
int write_access)
#else
/* Return type changed in 2.3.23 */
struct page *DRM(vm_shm_nopage_lock)(struct vm_area_struct *vma,
unsigned long address,
int write_access)
#endif
/* Special close routine which deletes map information if we are the last
* person to close a mapping and its not in the global maplist.
*/
void DRM(vm_shm_close)(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
unsigned long physical;
unsigned long offset;
unsigned long page;
drm_vma_entry_t *pt, *prev;
drm_map_t *map;
drm_map_list_t *r_list;
struct list_head *list;
int found_maps = 0;
if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
if (!dev->lock.hw_lock) return NOPAGE_OOM; /* Nothing allocated */
offset = address - vma->vm_start;
page = offset >> PAGE_SHIFT;
physical = (unsigned long)dev->lock.hw_lock + offset;
atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical);
#if LINUX_VERSION_CODE < 0x020317
return physical;
#else
return virt_to_page(physical);
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
#if LINUX_VERSION_CODE < 0x020333
MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
#endif
atomic_dec(&dev->vma_count);
#if LINUX_VERSION_CODE >= 0x020300
map = vma->vm_private_data;
#else
map = vma->vm_pte;
#endif
down(&dev->struct_sem);
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
#if LINUX_VERSION_CODE >= 0x020300
if (pt->vma->vm_private_data == map) found_maps++;
#else
if (pt->vma->vm_pte == map) found_maps++;
#endif
if (pt->vma == vma) {
if (prev) {
prev->next = pt->next;
} else {
dev->vmalist = pt->next;
}
DRM(free)(pt, sizeof(*pt), DRM_MEM_VMAS);
}
}
/* We were the only map that was found */
if(found_maps == 1 &&
map->flags & _DRM_REMOVABLE) {
/* Check to see if we are in the maplist, if we are not, then
* we delete this mappings information.
*/
found_maps = 0;
list = &dev->maplist->head;
list_for_each(list, &dev->maplist->head) {
r_list = (drm_map_list_t *) list;
if (r_list->map == map) found_maps++;
}
if(!found_maps) {
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
#ifdef __REALLY_HAVE_MTRR
if (map->mtrr >= 0) {
int retcode;
retcode = mtrr_del(map->mtrr,
map->offset,
map->size);
DRM_DEBUG("mtrr_del = %d\n", retcode);
}
#endif
DRM(ioremapfree)(map->handle, map->size);
break;
case _DRM_SHM:
vfree(map->handle);
break;
case _DRM_AGP:
break;
}
DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
}
}
up(&dev->struct_sem);
}
#if LINUX_VERSION_CODE < 0x020317
@ -176,9 +234,7 @@ void DRM(vm_open)(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
#if DRM_DEBUG_CODE
drm_vma_entry_t *vma_entry;
#endif
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
@ -188,8 +244,6 @@ void DRM(vm_open)(struct vm_area_struct *vma)
MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
#endif
#if DRM_DEBUG_CODE
vma_entry = DRM(alloc)(sizeof(*vma_entry), DRM_MEM_VMAS);
if (vma_entry) {
down(&dev->struct_sem);
@ -199,16 +253,13 @@ void DRM(vm_open)(struct vm_area_struct *vma)
dev->vmalist = vma_entry;
up(&dev->struct_sem);
}
#endif
}
void DRM(vm_close)(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
#if DRM_DEBUG_CODE
drm_vma_entry_t *pt, *prev;
#endif
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
@ -217,7 +268,6 @@ void DRM(vm_close)(struct vm_area_struct *vma)
#endif
atomic_dec(&dev->vma_count);
#if DRM_DEBUG_CODE
down(&dev->struct_sem);
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
if (pt->vma == vma) {
@ -231,7 +281,6 @@ void DRM(vm_close)(struct vm_area_struct *vma)
}
}
up(&dev->struct_sem);
#endif
}
int DRM(mmap_dma)(struct file *filp, struct vm_area_struct *vma)
@ -272,7 +321,8 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_map_t *map = NULL;
int i;
drm_map_list_t *r_list;
struct list_head *list;
DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
vma->vm_start, vma->vm_end, VM_OFFSET(vma));
@ -286,12 +336,13 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
once, so it doesn't have to be optimized
for performance, even if the list was a
bit longer. */
for (i = 0; i < dev->map_count; i++) {
map = dev->maplist[i];
list_for_each(list, &dev->maplist->head) {
r_list = (drm_map_list_t *)list;
map = r_list->map;
if (!map) continue;
if (map->offset == VM_OFFSET(vma)) break;
}
if (i >= dev->map_count) return -EINVAL;
if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
return -EPERM;
@ -339,17 +390,12 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
vma->vm_ops = &drm_vm_ops;
break;
case _DRM_SHM:
if (map->flags & _DRM_CONTAINS_LOCK)
vma->vm_ops = &drm_vm_shm_lock_ops;
else {
vma->vm_ops = &drm_vm_shm_ops;
#if LINUX_VERSION_CODE >= 0x020300
vma->vm_private_data = (void *)map;
#else
vma->vm_pte = (unsigned long)map;
#endif
}
/* Don't let this area swap. Change when
DRM_KERNEL advisory is supported. */
vma->vm_flags |= VM_LOCKED;

View File

@ -68,17 +68,23 @@
#define __HAVE_DMA_IRQ 1
#define __HAVE_DMA_IRQ_BH 1
#define DRIVER_PREINSTALL() do { \
drm_gamma_private_t *dev_priv = \
(drm_gamma_private_t *)dev->dev_private;\
GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000000 ); \
GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 ); \
} while (0)
#define DRIVER_POSTINSTALL() do { \
drm_gamma_private_t *dev_priv = \
(drm_gamma_private_t *)dev->dev_private;\
GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002001 ); \
GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000008 ); \
GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00039090 ); \
} while (0)
#define DRIVER_UNINSTALL() do { \
drm_gamma_private_t *dev_priv = \
(drm_gamma_private_t *)dev->dev_private;\
GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00000000 ); \
GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000000 ); \
GAMMA_WRITE( GAMMA_GINTENABLE, 0x00000000 ); \

View File

@ -41,6 +41,9 @@
static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
unsigned long length)
{
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
GAMMA_WRITE(GAMMA_DMAADDRESS, virt_to_phys((void *)address));
while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4)
;
@ -49,6 +52,9 @@ static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
void gamma_dma_quiescent_single(drm_device_t *dev)
{
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
while (GAMMA_READ(GAMMA_DMACOUNT))
;
while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
@ -65,6 +71,9 @@ void gamma_dma_quiescent_single(drm_device_t *dev)
void gamma_dma_quiescent_dual(drm_device_t *dev)
{
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
while (GAMMA_READ(GAMMA_DMACOUNT))
;
while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
@ -90,12 +99,18 @@ void gamma_dma_quiescent_dual(drm_device_t *dev)
void gamma_dma_ready(drm_device_t *dev)
{
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
while (GAMMA_READ(GAMMA_DMACOUNT))
;
}
static inline int gamma_dma_is_ready(drm_device_t *dev)
{
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
return !GAMMA_READ(GAMMA_DMACOUNT);
}
@ -103,6 +118,8 @@ void gamma_dma_service(int irq, void *device, struct pt_regs *regs)
{
drm_device_t *dev = (drm_device_t *)device;
drm_device_dma_t *dma = dev->dma;
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */
GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */

View File

@ -35,6 +35,10 @@
typedef struct drm_gamma_private {
drm_map_t *buffers;
drm_map_t *mmio0;
drm_map_t *mmio1;
drm_map_t *mmio2;
drm_map_t *mmio3;
} drm_gamma_private_t;
#define LOCK_TEST_WITH_RETURN( dev ) \
@ -60,16 +64,6 @@ extern int gamma_find_devices(void);
extern int gamma_found(void);
/* WARNING!!! MAGIC NUMBER!!! The number of regions already added to the
kernel must be specified here. Currently, the number is 2. This must
match the order the X server uses for instantiating register regions ,
or must be passed in a new ioctl. */
#define GAMMA_REG(reg) \
(2 \
+ ((reg < 0x1000) \
? 0 \
: ((reg < 0x10000) ? 1 : ((reg < 0x11000) ? 2 : 3))))
#define GAMMA_OFF(reg) \
((reg < 0x1000) \
? reg \
@ -79,7 +73,12 @@ extern int gamma_found(void);
? (reg - 0x10000) \
: (reg - 0x11000))))
#define GAMMA_BASE(reg) ((unsigned long)dev->maplist[GAMMA_REG(reg)]->handle)
#define GAMMA_BASE(reg) ((unsigned long) \
((reg < 0x1000) ? dev_priv->mmio0->handle : \
((reg < 0x10000) ? dev_priv->mmio1->handle : \
((reg < 0x11000) ? dev_priv->mmio2->handle : \
dev_priv->mmio3->handle))))
#define GAMMA_ADDR(reg) (GAMMA_BASE(reg) + GAMMA_OFF(reg))
#define GAMMA_DEREF(reg) *(__volatile__ int *)GAMMA_ADDR(reg)
#define GAMMA_READ(reg) GAMMA_DEREF(reg)

View File

@ -63,6 +63,8 @@
#define __HAVE_DMA_IRQ 1
#define __HAVE_DMA_IRQ_BH 1
#define DRIVER_PREINSTALL() do { \
drm_i810_private_t *dev_priv = \
(drm_i810_private_t *)dev->dev_private; \
u16 tmp; \
tmp = I810_READ16( I810REG_HWSTAM ); \
tmp = tmp & 0x6000; \
@ -77,6 +79,8 @@
} while (0)
#define DRIVER_POSTINSTALL() do { \
drm_i810_private_t *dev_priv = \
(drm_i810_private_t *)dev->dev_private; \
u16 tmp; \
tmp = I810_READ16( I810REG_INT_ENABLE_R ); \
tmp = tmp & 0x6000; \
@ -85,6 +89,8 @@
} while (0)
#define DRIVER_UNINSTALL() do { \
drm_i810_private_t *dev_priv = \
(drm_i810_private_t *)dev->dev_private; \
u16 tmp; \
tmp = I810_READ16( I810REG_INT_IDENTITY_R ); \
tmp = tmp & ~(0x6000); /* Clear all interrupts */ \
@ -101,10 +107,6 @@
#define DRIVER_BUF_PRIV_T drm_i810_buf_priv_t
#define DRIVER_AGP_BUFFERS_MAP( dev ) \
({ \
drm_i810_private_t *dev_priv = (dev)->dev_private; \
drm_map_t *map = (dev)->maplist[dev_priv->buffer_map_idx]; \
map; \
})
((drm_i810_private_t *)((dev)->dev_private))->buffer_map
#endif

View File

@ -396,23 +396,26 @@ static int i810_dma_initialize(drm_device_t *dev,
drm_i810_private_t *dev_priv,
drm_i810_init_t *init)
{
drm_map_t *sarea_map;
struct list_head *list;
dev->dev_private = (void *) dev_priv;
memset(dev_priv, 0, sizeof(drm_i810_private_t));
if (init->ring_map_idx >= dev->map_count ||
init->buffer_map_idx >= dev->map_count) {
i810_dma_cleanup(dev);
DRM_ERROR("ring_map or buffer_map are invalid\n");
return -EINVAL;
list_for_each(list, &dev->maplist->head) {
drm_map_list_t *r_list = (drm_map_list_t *)list;
if( r_list->map &&
r_list->map->type == _DRM_SHM &&
r_list->map->flags & _DRM_CONTAINS_LOCK ) {
dev_priv->sarea_map = r_list->map;
break;
}
}
dev_priv->ring_map_idx = init->ring_map_idx;
dev_priv->buffer_map_idx = init->buffer_map_idx;
sarea_map = dev->maplist[0];
DRM_FIND_MAP( dev_priv->mmio_map, init->mmio_offset );
DRM_FIND_MAP( dev_priv->buffer_map, init->buffers_offset );
dev_priv->sarea_priv = (drm_i810_sarea_t *)
((u8 *)sarea_map->handle +
((u8 *)dev_priv->sarea_map->handle +
init->sarea_priv_offset);
atomic_set(&dev_priv->flush_done, 0);
@ -865,6 +868,7 @@ static void i810_dma_dispatch_vertex(drm_device_t *dev,
void i810_dma_service(int irq, void *device, struct pt_regs *regs)
{
drm_device_t *dev = (drm_device_t *)device;
drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
u16 temp;
atomic_inc(&dev->counts[_DRM_STAT_IRQ]);

View File

@ -98,8 +98,8 @@ typedef struct _drm_i810_init {
I810_INIT_DMA = 0x01,
I810_CLEANUP_DMA = 0x02
} func;
int ring_map_idx;
int buffer_map_idx;
unsigned int mmio_offset;
unsigned int buffers_offset;
int sarea_priv_offset;
unsigned int ring_start;
unsigned int ring_end;

View File

@ -54,11 +54,12 @@ typedef struct _drm_i810_ring_buffer{
} drm_i810_ring_buffer_t;
typedef struct drm_i810_private {
int ring_map_idx;
int buffer_map_idx;
drm_map_t *sarea_map;
drm_map_t *buffer_map;
drm_map_t *mmio_map;
drm_i810_ring_buffer_t ring;
drm_i810_sarea_t *sarea_priv;
drm_i810_ring_buffer_t ring;
unsigned long hw_status_page;
unsigned long counter;
@ -108,9 +109,8 @@ int i810_clear_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
#define I810_REG(reg) 2
#define I810_BASE(reg) ((unsigned long) \
dev->maplist[I810_REG(reg)]->handle)
dev_priv->mmio_map->handle)
#define I810_ADDR(reg) (I810_BASE(reg) + reg)
#define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg)
#define I810_READ(reg) I810_DEREF(reg)

View File

@ -436,6 +436,7 @@ int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf )
static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
{
drm_mga_private_t *dev_priv;
struct list_head *list;
int ret;
DRM_DEBUG( "%s\n", __FUNCTION__ );
@ -467,7 +468,15 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
dev_priv->depth_offset = init->depth_offset;
dev_priv->depth_pitch = init->depth_pitch;
dev_priv->sarea = dev->maplist[0];
list_for_each(list, &dev->maplist->head) {
drm_map_list_t *r_list = (drm_map_list_t *)list;
if( r_list->map &&
r_list->map->type == _DRM_SHM &&
r_list->map->flags & _DRM_CONTAINS_LOCK ) {
dev_priv->sarea = r_list->map;
break;
}
}
DRM_FIND_MAP( dev_priv->fb, init->fb_offset );
DRM_FIND_MAP( dev_priv->mmio, init->mmio_offset );

View File

@ -344,7 +344,7 @@ static void r128_cce_init_ring_buffer( drm_device_t *dev )
static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
{
drm_r128_private_t *dev_priv;
int i;
struct list_head *list;
dev_priv = DRM(alloc)( sizeof(drm_r128_private_t), DRM_MEM_DRIVER );
if ( dev_priv == NULL )
@ -451,12 +451,12 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch/8) << 21) |
(dev_priv->span_offset >> 5));
/* FIXME: We want multiple shared areas, including one shared
* only by the X Server and kernel module.
*/
for ( i = 0 ; i < dev->map_count ; i++ ) {
if ( dev->maplist[i]->type == _DRM_SHM ) {
dev_priv->sarea = dev->maplist[i];
list_for_each(list, &dev->maplist->head) {
drm_map_list_t *r_list = (drm_map_list_t *)list;
if( r_list->map &&
r_list->map->type == _DRM_SHM &&
r_list->map->flags & _DRM_CONTAINS_LOCK ) {
dev_priv->sarea = r_list->map;
break;
}
}

View File

@ -601,7 +601,7 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev )
static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
{
drm_radeon_private_t *dev_priv;
int i;
struct list_head *list;
dev_priv = DRM(alloc)( sizeof(drm_radeon_private_t), DRM_MEM_DRIVER );
if ( dev_priv == NULL )
@ -710,12 +710,12 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
RADEON_ROUND_MODE_TRUNC |
RADEON_ROUND_PREC_8TH_PIX);
/* FIXME: We want multiple shared areas, including one shared
* only by the X Server and kernel module.
*/
for ( i = 0 ; i < dev->map_count ; i++ ) {
if ( dev->maplist[i]->type == _DRM_SHM ) {
dev_priv->sarea = dev->maplist[i];
list_for_each(list, &dev->maplist->head) {
drm_map_list_t *r_list = (drm_map_list_t *)list;
if( r_list->map &&
r_list->map->type == _DRM_SHM &&
r_list->map->flags & _DRM_CONTAINS_LOCK ) {
dev_priv->sarea = r_list->map;
break;
}
}

View File

@ -138,9 +138,15 @@ typedef enum drm_map_flags {
_DRM_LOCKED = 0x04, /* shared, cached, locked */
_DRM_KERNEL = 0x08, /* kernel requires access */
_DRM_WRITE_COMBINING = 0x10, /* use write-combining if available */
_DRM_CONTAINS_LOCK = 0x20 /* SHM page that contains lock */
_DRM_CONTAINS_LOCK = 0x20, /* SHM page that contains lock */
_DRM_REMOVABLE = 0x40 /* Removable mapping */
} drm_map_flags_t;
typedef struct drm_ctx_priv_map {
unsigned int ctx_id; /* Context requesting private mapping */
void *handle; /* Handle of map */
} drm_ctx_priv_map_t;
typedef struct drm_map {
unsigned long offset; /* Requested physical address (0 for SAREA)*/
unsigned long size; /* Requested physical size (bytes) */
@ -365,6 +371,11 @@ typedef struct drm_agp_info {
#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t)
#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t)
#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, drm_map_t)
#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, drm_ctx_priv_map_t)
#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, drm_ctx_priv_map_t)
#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t)
#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t)
#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t)

View File

@ -138,9 +138,15 @@ typedef enum drm_map_flags {
_DRM_LOCKED = 0x04, /* shared, cached, locked */
_DRM_KERNEL = 0x08, /* kernel requires access */
_DRM_WRITE_COMBINING = 0x10, /* use write-combining if available */
_DRM_CONTAINS_LOCK = 0x20 /* SHM page that contains lock */
_DRM_CONTAINS_LOCK = 0x20, /* SHM page that contains lock */
_DRM_REMOVABLE = 0x40 /* Removable mapping */
} drm_map_flags_t;
typedef struct drm_ctx_priv_map {
unsigned int ctx_id; /* Context requesting private mapping */
void *handle; /* Handle of map */
} drm_ctx_priv_map_t;
typedef struct drm_map {
unsigned long offset; /* Requested physical address (0 for SAREA)*/
unsigned long size; /* Requested physical size (bytes) */
@ -365,6 +371,11 @@ typedef struct drm_agp_info {
#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t)
#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t)
#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, drm_map_t)
#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, drm_ctx_priv_map_t)
#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, drm_ctx_priv_map_t)
#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t)
#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t)
#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t)